hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72128c8178af94dd8a0a21b6e9bdb1ebcf3a076 | 2,575 | py | Python | graphs/karger.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | graphs/karger.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | graphs/karger.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | """
An implementation of Karger's Algorithm for partitioning a graph.
"""
from __future__ import annotations
import random
# Adjacency list representation of this graph:
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
"""
Partitions a graph using Karger's Algorithm. Implemented from
pseudocode found here:
https://en.wikipedia.org/wiki/Karger%27s_algorithm.
This function involves random choices, meaning it will not give
consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
>>> graph = {'0':['1'], '1':['0']}
>>> partition_graph(graph)
{('0', '1')}
"""
# Dict that maps contracted nodes to a list of all the nodes it "contains."
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
# Choose a random edge.
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
# Contract edge (u, v) to new node uv
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
# Remove nodes u and v.
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
# Find cutset.
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH))
| 29.597701 | 88 | 0.572816 |
from __future__ import annotations
import random
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH))
| true | true |
f721291ec9a303b02a2cdf00dbe42788ad4a0a98 | 1,661 | py | Python | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 176 | 2019-11-16T19:44:08.000Z | 2021-09-10T22:16:04.000Z | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 12 | 2019-11-21T02:02:07.000Z | 2020-02-17T21:45:57.000Z | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 12 | 2019-11-20T08:07:11.000Z | 2021-02-27T09:52:06.000Z | """
Assembly
"""
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
__about__ = {}
with open(os.path.join(base_dir, "assembly", "about.py")) as f:
exec(f.read(), __about__)
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
setup(
name=__about__["__title__"],
version=__about__["__version__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
description=__about__["__summary__"],
url=__about__["__uri__"],
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['assembly'],
entry_points=dict(console_scripts=[
'asm=assembly.scripts:cmd',
]),
include_package_data=True,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requires,
keywords=['flask',
'assembly',
'templates',
'views',
'classy',
'framework',
"mvc",
"blueprint"],
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False
)
| 27.683333 | 81 | 0.615292 |
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
__about__ = {}
with open(os.path.join(base_dir, "assembly", "about.py")) as f:
exec(f.read(), __about__)
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
setup(
name=__about__["__title__"],
version=__about__["__version__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
description=__about__["__summary__"],
url=__about__["__uri__"],
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['assembly'],
entry_points=dict(console_scripts=[
'asm=assembly.scripts:cmd',
]),
include_package_data=True,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requires,
keywords=['flask',
'assembly',
'templates',
'views',
'classy',
'framework',
"mvc",
"blueprint"],
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False
)
| true | true |
f7212933bf16d8d621d170fadea4f1c611eeef47 | 2,438 | py | Python | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | 2 | 2020-10-21T22:14:15.000Z | 2020-10-21T22:14:16.000Z | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
id = x[6]
finalObj[id]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img'][0:-1]
# finalObj[x[6]]['district'] = x[1]
# finalObj[x[6]]['sector'] = x[2]
# finalObj[x[6]]['cell'] = x[3]
# finalObj[x[6]]['bridge_site'] = x[4]
# finalObj[x[6]]['stage'] = x[5]
# finalObj[x[6]]['id'] = int(x[6])
# finalObj[x[6]]['type'] = x[7]
# finalObj[x[6]]['latt'] = float(x[8])
# finalObj[x[6]]['long'] = float(x[9])
# try:
# serv = float(x[10])
# except:
# serv = x[10]
# sv = x[13].split(' ')[2]
# finalObj[x[6]]['served'] = serv
# finalObj[x[6]]['community_served'] = x[14]
# try:
# pId = int(x[15])
# except :
# pId = x[15]
# finalObj[x[6]]['provId'] = pId
# finalObj[x[6]]['districtId'] = int(x[16])
# finalObj[x[6]]['sectorId'] = int(x[17])
# finalObj[x[6]]['cellId'] = int(x[18][0:-1])
# print(id)
row = fin[0].split(',')
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
if i == 8:
key = 'latitude'
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = float(x[i])
except ValueError :
val = x[i]
else:
val = x[i]
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 30.475 | 97 | 0.454471 | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
id = x[6]
finalObj[id]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img'][0:-1]
row = fin[0].split(',')
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
if i == 8:
key = 'latitude'
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = float(x[i])
except ValueError :
val = x[i]
else:
val = x[i]
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
| true | true |
f7212939f549dcbfa9bf8b9ff6fb82924b643d38 | 1,241 | py | Python | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 1,793 | 2015-07-20T14:14:18.000Z | 2022-03-29T13:00:16.000Z | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 183 | 2015-09-04T14:00:57.000Z | 2022-03-19T15:52:13.000Z | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 339 | 2015-09-03T11:13:46.000Z | 2022-03-20T08:21:15.000Z | # -*- coding: utf-8 -*-
# dict_conv.py (Python3 script)
import sys
ENC_UTF16_BE = 1
ENC_UTF16_LE = 2
def add_char(enc, s, c):
if enc == ENC_UTF16_BE:
s += "\\x00"
s += c
if enc == ENC_UTF16_LE:
s += "\\x00"
return s
def conv(enc, s):
n = len(s)
r = ""
i = 0
while i < n:
c = s[i]
if c == '\\':
c = s[i+1]
if c == '\\' or c == '"':
r = add_char(enc, r, "\\" + c)
i += 2
continue
else:
raise("Unknown escape {0}".format(s))
r = add_char(enc, r, c)
i += 1
return r
def main(enc):
print("# This file was generated by dict_conv.py.")
for line in sys.stdin:
s = line.strip()
if s[0] == '#':
print(s)
continue
if s[0] == '"' and s[-1] == '"':
s = conv(enc, s[1:-1])
print("\"{0}\"".format(s))
else:
raise("Invalid format {0}".format(s))
def usage(argv):
raise RuntimeError("Usage: python {0} utf16_be/utf16_le".format(argv[0]))
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc >= 2:
s = argv[1]
if s == 'utf16_be':
enc = ENC_UTF16_BE
elif s == 'utf16_le':
enc = ENC_UTF16_LE
else:
usage(argv)
else:
usage(argv)
main(enc)
| 17 | 75 | 0.498791 |
import sys
ENC_UTF16_BE = 1
ENC_UTF16_LE = 2
def add_char(enc, s, c):
if enc == ENC_UTF16_BE:
s += "\\x00"
s += c
if enc == ENC_UTF16_LE:
s += "\\x00"
return s
def conv(enc, s):
n = len(s)
r = ""
i = 0
while i < n:
c = s[i]
if c == '\\':
c = s[i+1]
if c == '\\' or c == '"':
r = add_char(enc, r, "\\" + c)
i += 2
continue
else:
raise("Unknown escape {0}".format(s))
r = add_char(enc, r, c)
i += 1
return r
def main(enc):
print("
for line in sys.stdin:
s = line.strip()
if s[0] == '#':
print(s)
continue
if s[0] == '"' and s[-1] == '"':
s = conv(enc, s[1:-1])
print("\"{0}\"".format(s))
else:
raise("Invalid format {0}".format(s))
def usage(argv):
raise RuntimeError("Usage: python {0} utf16_be/utf16_le".format(argv[0]))
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc >= 2:
s = argv[1]
if s == 'utf16_be':
enc = ENC_UTF16_BE
elif s == 'utf16_le':
enc = ENC_UTF16_LE
else:
usage(argv)
else:
usage(argv)
main(enc)
| true | true |
f721296a022523474df762cfe7ddb9431b342931 | 1,300 | py | Python | src/relaxed/infer/hypothesis_test.py | gradhep/smooth | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 4 | 2020-05-18T17:43:07.000Z | 2020-07-13T12:05:10.000Z | src/relaxed/infer/hypothesis_test.py | gradhep/relaxed | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 13 | 2021-05-13T20:59:55.000Z | 2022-03-25T12:04:44.000Z | src/relaxed/infer/hypothesis_test.py | gradhep/relaxed | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 3 | 2020-05-21T13:24:10.000Z | 2021-04-22T12:36:33.000Z | """Calculate expected CLs values with hypothesis tests."""
from __future__ import annotations
__all__ = ("hypotest",)
from functools import partial
import jax.numpy as jnp
import pyhf
from chex import Array
from jax import jit
from ..mle import fit, fixed_poi_fit
@partial(jit, static_argnames=["model", "return_mle_pars"]) # forward pass
def hypotest(
test_poi: float,
data: Array,
model: pyhf.Model,
lr: float,
return_mle_pars: bool = False,
) -> tuple[Array, Array] | Array:
# hard-code 1 as inits for now
# TODO: need to parse different inits for constrained and global fits
init_pars = jnp.ones_like(jnp.asarray(model.config.suggested_init()))
conditional_pars = fixed_poi_fit(
data, model, poi_condition=test_poi, init_pars=init_pars[:-1], lr=lr
)
mle_pars = fit(data, model, init_pars=init_pars, lr=lr)
profile_likelihood = -2 * (
model.logpdf(conditional_pars, data)[0] - model.logpdf(mle_pars, data)[0]
)
poi_hat = mle_pars[model.config.poi_index]
qmu = jnp.where(poi_hat < test_poi, profile_likelihood, 0.0)
CLsb = 1 - pyhf.tensorlib.normal_cdf(jnp.sqrt(qmu))
altval = 0.0
CLb = 1 - pyhf.tensorlib.normal_cdf(altval)
CLs = CLsb / CLb
return (CLs, mle_pars) if return_mle_pars else CLs
| 30.232558 | 81 | 0.698462 | from __future__ import annotations
__all__ = ("hypotest",)
from functools import partial
import jax.numpy as jnp
import pyhf
from chex import Array
from jax import jit
from ..mle import fit, fixed_poi_fit
@partial(jit, static_argnames=["model", "return_mle_pars"])
def hypotest(
test_poi: float,
data: Array,
model: pyhf.Model,
lr: float,
return_mle_pars: bool = False,
) -> tuple[Array, Array] | Array:
init_pars = jnp.ones_like(jnp.asarray(model.config.suggested_init()))
conditional_pars = fixed_poi_fit(
data, model, poi_condition=test_poi, init_pars=init_pars[:-1], lr=lr
)
mle_pars = fit(data, model, init_pars=init_pars, lr=lr)
profile_likelihood = -2 * (
model.logpdf(conditional_pars, data)[0] - model.logpdf(mle_pars, data)[0]
)
poi_hat = mle_pars[model.config.poi_index]
qmu = jnp.where(poi_hat < test_poi, profile_likelihood, 0.0)
CLsb = 1 - pyhf.tensorlib.normal_cdf(jnp.sqrt(qmu))
altval = 0.0
CLb = 1 - pyhf.tensorlib.normal_cdf(altval)
CLs = CLsb / CLb
return (CLs, mle_pars) if return_mle_pars else CLs
| true | true |
f7212c9ffcc95acbd3066fc3058f89d2b03ec98b | 1,750 | py | Python | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
class IIndexEvent(Interface):
"""
An lower level event involving the index
"""
class IIndexUpdate(Interface):
"""
An low level event involving the index
"""
class IPackageEvent(IIndexEvent):
"""
An event involving a package
"""
path = Attribute('Path to package')
class IPackageAdded(IPackageEvent):
"""
A package is added to the repository
"""
class IPackageRemoved(IPackageEvent):
"""
A package is removed to the repository
"""
class IndexEvent(object):
implements(IIndexEvent)
def __init__(self, datafile, index, pkgdatas=None):
self.index = index
self.datafile = datafile
self.pkgdatas = pkgdatas
class IndexUpdate(IndexEvent):
implements(IIndexUpdate)
class PackageEvent(object):
"""
Baseclass for pacakage events
"""
implements(IPackageEvent)
def __init__(self, index_manager, path=None, name=None, version=None):
self.name = name
self.version = version
self.im = index_manager
self.path = path
if self.name is None and self.path:
info = self.im.pkginfo_from_file(path, self.im.move_on_error)
self.name = info.name
self.version = info.version
class PackageAdded(PackageEvent):
implements(IPackageAdded)
def __init__(self, index_manager, path=None, name=None, version=None, rebuild_leaf=True):
super(PackageAdded, self).__init__(index_manager, path, name, version)
self.rebuild_leaf = rebuild_leaf
class PackageRemoved(PackageEvent):
implements(IPackageRemoved)
| 22.727273 | 93 | 0.668571 | from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
class IIndexEvent(Interface):
class IIndexUpdate(Interface):
class IPackageEvent(IIndexEvent):
path = Attribute('Path to package')
class IPackageAdded(IPackageEvent):
class IPackageRemoved(IPackageEvent):
class IndexEvent(object):
implements(IIndexEvent)
def __init__(self, datafile, index, pkgdatas=None):
self.index = index
self.datafile = datafile
self.pkgdatas = pkgdatas
class IndexUpdate(IndexEvent):
implements(IIndexUpdate)
class PackageEvent(object):
implements(IPackageEvent)
def __init__(self, index_manager, path=None, name=None, version=None):
self.name = name
self.version = version
self.im = index_manager
self.path = path
if self.name is None and self.path:
info = self.im.pkginfo_from_file(path, self.im.move_on_error)
self.name = info.name
self.version = info.version
class PackageAdded(PackageEvent):
implements(IPackageAdded)
def __init__(self, index_manager, path=None, name=None, version=None, rebuild_leaf=True):
super(PackageAdded, self).__init__(index_manager, path, name, version)
self.rebuild_leaf = rebuild_leaf
class PackageRemoved(PackageEvent):
implements(IPackageRemoved)
| true | true |
f7212d29fbadd9f4d8375d99f948230868b7821d | 520 | py | Python | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 6 | 2018-05-30T10:41:56.000Z | 2020-08-05T16:47:54.000Z | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 39 | 2018-07-11T14:44:01.000Z | 2019-08-06T12:27:43.000Z | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 3 | 2018-07-11T14:54:39.000Z | 2019-04-07T04:47:29.000Z | from __future__ import print_function
import time
import unittest
from flexp.flow.parallel import parallelize
def add_two(x):
return x + 2
class TestParallel(unittest.TestCase):
def test_parallel(self):
count = 50
data = range(0, count)
start = time.clock()
res = list(parallelize(add_two, data, 25))
end = time.clock()
print("Time to process {}".format(end - start))
assert len(res) == count
assert sum(res) == (2 + count + 1) * count / 2
| 20.8 | 55 | 0.617308 | from __future__ import print_function
import time
import unittest
from flexp.flow.parallel import parallelize
def add_two(x):
return x + 2
class TestParallel(unittest.TestCase):
def test_parallel(self):
count = 50
data = range(0, count)
start = time.clock()
res = list(parallelize(add_two, data, 25))
end = time.clock()
print("Time to process {}".format(end - start))
assert len(res) == count
assert sum(res) == (2 + count + 1) * count / 2
| true | true |
f7212eed7b4fc035239879fd2bda4106c25fb513 | 2,399 | py | Python | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | null | null | null | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | null | null | null | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | 1 | 2020-05-02T05:58:55.000Z | 2020-05-02T05:58:55.000Z | ## Basic Python libraries
import os
from PIL import Image
## Deep learning and array processing libraries
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
## Inner-project imports
from model import EncoderCNN, DecoderRNN
##### Code begins #####
# Path to config file
image_directory = './CNN/images/'
network_directory = './CNN/models/'
# Setting up other necessary paths
encoder_path = f'{network_directory}encoder-5-3000.pkl'
# Define the compute device (either GPU or CPU)
if torch.cuda.is_available():
compute_device = torch.device('cuda:0')
else:
compute_device = torch.device('cpu')
print(f'Using device: {compute_device}')
# Create the data transforms for evaluating
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Configure network
network = EncoderCNN(embed_size=256)
network = network.eval()
network.load_state_dict(torch.load(encoder_path, map_location='cpu'))
network = network.to(compute_device)
def get_visual_features(img):
"""
Extracts the visual features from an input image. Converts input
into PIL Image, normalizes the image, then feeds it through a CNN.
The features returned from the CNN are then pooled into a 1x512x1x1
and finally squeezed to produce our [512] array output.
Input
img :: 3D NumPy array
Takes a [x, y, 3] NumPy array to be converted into a PIL Image
Output
features :: 1D NumPy array
Returns a [512] NumPy array of the visual features from the CNN
"""
# Convert to PIL Image and perform transformation
img = Image.fromarray(img).convert('RGB')
img = img.resize([224, 224], Image.LANCZOS)
img = transform(img)
# Add a 4th dimension and send to compute device (GPU or CPU)
img = img.unsqueeze(0)
img = img.to(compute_device)
# Feed input through CNN
features = network(img)
# Squeeze into a [512] vector
features = features.squeeze()
# Convert to NumPy
features = features.cpu().detach().numpy()
return features
# Below is only there for testing, commented out for now
"""
if __name__ == '__main__':
# Inference
img = Image.open(f'{image_directory}input/1.png')
img = np.asarray(img)
features = get_visual_features(img)
print('End')
""" | 28.903614 | 132 | 0.709462 | rt Image
.nn.functional as F
import torchvision
import torchvision.transforms as transforms
derCNN, DecoderRNN
ork_directory}encoder-5-3000.pkl'
if torch.cuda.is_available():
compute_device = torch.device('cuda:0')
else:
compute_device = torch.device('cpu')
print(f'Using device: {compute_device}')
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
network = EncoderCNN(embed_size=256)
network = network.eval()
network.load_state_dict(torch.load(encoder_path, map_location='cpu'))
network = network.to(compute_device)
def get_visual_features(img):
img = Image.fromarray(img).convert('RGB')
img = img.resize([224, 224], Image.LANCZOS)
img = transform(img)
img = img.unsqueeze(0)
img = img.to(compute_device)
features = network(img)
features = features.squeeze()
features = features.cpu().detach().numpy()
return features
| true | true |
f72130741418bf63d0a3ae4aa5d952c8d92a5ac0 | 72 | py | Python | python/dcf-tools/dcf2dev/__init__.py | tshu/lely-core | fd0ceff2db726af6d2a766039a0b5a6d33d056e8 | [
"Apache-2.0"
] | 4 | 2020-12-27T11:31:57.000Z | 2022-02-09T11:32:08.000Z | python/dcf-tools/dcf2dev/__init__.py | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | null | null | null | python/dcf-tools/dcf2dev/__init__.py | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | 1 | 2022-01-03T01:41:59.000Z | 2022-01-03T01:41:59.000Z | from .cdevice import CDevice, CObject, CSubObject, CValue # noqa: F401
| 36 | 71 | 0.763889 | from .cdevice import CDevice, CObject, CSubObject, CValue
| true | true |
f72130c7c2fc80d8513e06cfe21833534c1bec7a | 1,022 | py | Python | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | 1 | 2021-11-11T01:49:28.000Z | 2021-11-11T01:49:28.000Z | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | null | null | null | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | null | null | null | import logging
from subprocess import Popen, PIPE, STDOUT
from lib.parse.sen_ascii_parse import SenAsciiParse
class SenBinParse:
def __init__(self):
self.ascii_parser = SenAsciiParse()
def parse_packet(self, packet, with_header=True):
if len(packet) < 10:
return
length = len(packet)
status = 0
data = packet
invalid_chunks = 0
if with_header:
length, status = packet[:2]
data = packet[2:-1]
invalid_chunks = packet[-1]
logging.debug("BIN IN: CHK=%d" % invalid_chunks)
#if invalid_chunks != 0:
# return
parser = Popen(['moveon-sen-parser', str(invalid_chunks)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = parser.communicate(input=data)[0]
logging.debug("BIN OUT: %s" % stdout.strip().decode())
for line in stdout.decode().splitlines():
for data in self.ascii_parser.parse_packet(line, "com"):
yield data
| 28.388889 | 106 | 0.596869 | import logging
from subprocess import Popen, PIPE, STDOUT
from lib.parse.sen_ascii_parse import SenAsciiParse
class SenBinParse:
def __init__(self):
self.ascii_parser = SenAsciiParse()
def parse_packet(self, packet, with_header=True):
if len(packet) < 10:
return
length = len(packet)
status = 0
data = packet
invalid_chunks = 0
if with_header:
length, status = packet[:2]
data = packet[2:-1]
invalid_chunks = packet[-1]
logging.debug("BIN IN: CHK=%d" % invalid_chunks)
parser = Popen(['moveon-sen-parser', str(invalid_chunks)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = parser.communicate(input=data)[0]
logging.debug("BIN OUT: %s" % stdout.strip().decode())
for line in stdout.decode().splitlines():
for data in self.ascii_parser.parse_packet(line, "com"):
yield data
| true | true |
f72130ca917b168f52b2f7177e043fcb23096456 | 1,897 | py | Python | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | 28 | 2015-01-28T11:17:04.000Z | 2022-02-07T12:48:22.000Z | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | null | null | null | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | 5 | 2015-01-29T19:34:45.000Z | 2019-03-17T11:15:26.000Z | import os
import sys
##############
# NOTE: You will need to build boost
# On windows, use the following command from the visual studio command
# prompt (after running boostrap.bat)
#
# bjam --build-dir=c:\boost --build-type=complete --toolset=msvc-9.0 address-model=64 architecture=x86 --with-system
##############
currentPath = os.getcwd()
config = "Debug"
boostLocation = "C:/boost"
boostLibString = "libboost_system-vc90-s-1_52.lib" if config == "Release" else "libboost_system-vc90-sgd-1_52.lib";
# Main list of libraries to be concatinated into the final library
# NOTE: For non-windows environments, the .lib is replaced with .o below
#
# Remove any libries from this list that you would rather link manually (separately)
inputLibs = [
# Main node.js library
currentPath+'/'+config+"/lib/node.lib",
# v8
currentPath+'/build/'+config+"/lib/v8_base.lib",
currentPath+'/build/'+config+"/lib/v8_nosnapshot.lib",
currentPath+'/build/'+config+"/lib/v8_snapshot.lib",
# libuv
currentPath+'/'+config+"/lib/libuv.lib",
# Other direct dependencies of node
currentPath+'/'+config+"/lib/cares.lib",
currentPath+'/'+config+"/lib/http_parser.lib",
currentPath+'/'+config+"/lib/openssl.lib",
currentPath+'/'+config+"/lib/zlib.lib",
# Boost
#boostLocation+"/boost/bin.v2/libs/system/build/msvc-9.0/"+config+"/address-model-64/architecture-x86/link-static/runtime-link-static/" + boostLibString
]
inputLibString = ""
inputOString = ""
# Build our list of input libraries for windows (.lib)
for lib in inputLibs:
inputLibString += lib + " "
# Build an equivelant list for non-windows (.o)
for lib in inputLibs:
lib.replace(".lib", ".o")
inputOString += lib + " "
# Concatinate!
if sys.platform == 'win32':
os.system('lib.exe /OUT:'+currentPath+'/'+config+'/node.lib ' + inputLibString)
else:
os.system('ar rcs '+currentPath+'/'+config+'/node.a ' + inputOString)
| 30.596774 | 153 | 0.698471 | import os
import sys
gd-1_52.lib";
inputLibs = [
currentPath+'/'+config+"/lib/node.lib",
currentPath+'/build/'+config+"/lib/v8_base.lib",
currentPath+'/build/'+config+"/lib/v8_nosnapshot.lib",
currentPath+'/build/'+config+"/lib/v8_snapshot.lib",
currentPath+'/'+config+"/lib/libuv.lib",
currentPath+'/'+config+"/lib/cares.lib",
currentPath+'/'+config+"/lib/http_parser.lib",
currentPath+'/'+config+"/lib/openssl.lib",
currentPath+'/'+config+"/lib/zlib.lib",
]
inputLibString = ""
inputOString = ""
for lib in inputLibs:
inputLibString += lib + " "
for lib in inputLibs:
lib.replace(".lib", ".o")
inputOString += lib + " "
if sys.platform == 'win32':
os.system('lib.exe /OUT:'+currentPath+'/'+config+'/node.lib ' + inputLibString)
else:
os.system('ar rcs '+currentPath+'/'+config+'/node.a ' + inputOString)
| true | true |
f72130ecddae709848f93c0001cf6167db8fb692 | 3,950 | py | Python | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\024MobileAppVendorProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\n;google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"q\n\x13MobileAppVendorEnum\"Z\n\x0fMobileAppVendor\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x13\n\x0f\x41PPLE_APP_STORE\x10\x02\x12\x14\n\x10GOOGLE_APP_STORE\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v4.enumsB\x14MobileAppVendorProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR = _descriptor.EnumDescriptor(
name='MobileAppVendor',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum.MobileAppVendor',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPLE_APP_STORE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_APP_STORE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=147,
serialized_end=237,
)
_sym_db.RegisterEnumDescriptor(_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR)
_MOBILEAPPVENDORENUM = _descriptor.Descriptor(
name='MobileAppVendorEnum',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=237,
)
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR.containing_type = _MOBILEAPPVENDORENUM
DESCRIPTOR.message_types_by_name['MobileAppVendorEnum'] = _MOBILEAPPVENDORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MobileAppVendorEnum = _reflection.GeneratedProtocolMessageType('MobileAppVendorEnum', (_message.Message,), dict(
DESCRIPTOR = _MOBILEAPPVENDORENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.mobile_app_vendor_pb2'
,
__doc__ = """Container for enum describing different types of mobile app vendors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.MobileAppVendorEnum)
))
_sym_db.RegisterMessage(MobileAppVendorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.72549 | 649 | 0.786076 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\024MobileAppVendorProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\n;google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"q\n\x13MobileAppVendorEnum\"Z\n\x0fMobileAppVendor\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x13\n\x0f\x41PPLE_APP_STORE\x10\x02\x12\x14\n\x10GOOGLE_APP_STORE\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v4.enumsB\x14MobileAppVendorProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR = _descriptor.EnumDescriptor(
name='MobileAppVendor',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum.MobileAppVendor',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPLE_APP_STORE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_APP_STORE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=147,
serialized_end=237,
)
_sym_db.RegisterEnumDescriptor(_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR)
_MOBILEAPPVENDORENUM = _descriptor.Descriptor(
name='MobileAppVendorEnum',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=237,
)
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR.containing_type = _MOBILEAPPVENDORENUM
DESCRIPTOR.message_types_by_name['MobileAppVendorEnum'] = _MOBILEAPPVENDORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MobileAppVendorEnum = _reflection.GeneratedProtocolMessageType('MobileAppVendorEnum', (_message.Message,), dict(
DESCRIPTOR = _MOBILEAPPVENDORENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.mobile_app_vendor_pb2'
,
__doc__ = """Container for enum describing different types of mobile app vendors.
""",
))
_sym_db.RegisterMessage(MobileAppVendorEnum)
DESCRIPTOR._options = None
| true | true |
f72131ada34d81d06455b0c8be1ca2dd4d1e24ee | 5,159 | py | Python | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | # generate_from_lm.py
"""
Load a trained language model and generate text
Example usage:
PYTHONPATH=. python generate_from_lm.py \
--init="Although the food" --tau=0.5 \
--sample_method=gumbel --g_eps=1e-5 \
--load_model='checkpoints/lm/mlstm/hotel/batch_size_64/lm_e9_2.93.pt' \
--dataset='hotel' --cpu=1 --sample_method=greedy
"""
import pdb
import torch
import torch.nn as nn
from models.custom_parallel import DataParallelModel
from models.mlstm import StackedLSTMEncoderDecoder
from models.nn_utils import move_to_cuda, setup_gpus, logits_to_prob, prob_to_vocab_id
from project_settings import HParams, PAD_ID, DatasetConfig
from utils import load_file, create_argparse_and_update_hp
#######################################
#
# Setup
#
#######################################
hp = HParams()
hp, run_name, parser = create_argparse_and_update_hp(hp)
parser.add_argument('--dataset', default='yelp',
help='yelp,amazon; will determine which subwordenc to use')
parser.add_argument('--init', default='The meaning of life is ',
help="Initial text ")
parser.add_argument('--load_model', default=None,
help="Path to model to load")
parser.add_argument('--seq_len', type=int, default=50,
help="Maximum sequence length")
parser.add_argument('--softmax_method', type=str, default='softmax',
help="softmax or gumbel")
parser.add_argument('--sample_method', type=str, default='sample',
help="sample or greedy")
parser.add_argument('--gumbel_hard', type=bool, default=False,
help="whether to produce one-hot from Gumbel softmax")
parser.add_argument('--beam_size', type=int, default=1,
help="Width for beam search")
parser.add_argument('--len_norm_factor', type=float, default=0.0,
help="Normalization factor")
parser.add_argument('--len_norm_const', type=float, default=5.0,
help="Normalization constant")
parser.add_argument('--gpus', default='0',
help="CUDA visible devices, e.g. 2,3")
parser.add_argument('--cpu', default=False,
help="if want to run on cpu, set --cpu=True")
opt = parser.parse_args()
setup_gpus(opt.gpus, hp.seed)
ds_conf = DatasetConfig(opt.dataset)
if opt.load_model is None:
opt.load_model = ds_conf.lm_path
#######################################
#
# Run
#
#######################################
def batchify(data, batch_size):
"""
Args:
data: 1D Tensor
batch_size: int
Returns:
data: reshaped Tensor of size (batch_size, -1)
Example where data is non-negative integers and batch_size = 4
[[0 1 2 3 4 5 6 ]
[7 8 9 10 11 12 13]
[14 15 16 17 18 19 20]
[21 22 23 24 25 26 27]]
Note: not currently using this anymore. Was used when reading in data from text fileW
"""
nbatch = data.size(0) // batch_size
data = data.narrow(0, 0, nbatch * batch_size) # same as slice
data = data.view(batch_size, -1).contiguous()
return data
#
# Prepare initial input text
#
subwordenc = load_file(ds_conf.subwordenc_path)
init_texts = [init for init in opt.init.split('|')]
init_tokens = [subwordenc.encode(init) for init in init_texts]
init_lens = [len(init) for init in init_tokens]
max_len = max(init_lens)
init_tokens_padded = [tokens + [PAD_ID for _ in range(max_len - len(tokens))] for tokens in init_tokens]
init_tensor = [batchify(torch.LongTensor(init), 1) for init in init_tokens_padded]
init_tensor = torch.cat(init_tensor, dim=0) # [batch, lens
init_tensor = move_to_cuda(init_tensor)
batch_size = init_tensor.size(0)
#
# Load and set up model
#
if opt.cpu:
checkpoint = torch.load(opt.load_model, map_location='cpu')
elif torch.cuda.is_available():
checkpoint = torch.load(opt.load_model) # StackedLSTMEncoder
model = checkpoint['model']
if isinstance(model, nn.DataParallel):
model = model.module
ngpus = 1 if len(opt.gpus) == 1 else len(opt.gpus.split(','))
#
# Generate
# #
if 'mlstm' in opt.load_model:
# Set up encoder decoder
embed, rnn = model.embed, model.rnn
enc_dec = StackedLSTMEncoderDecoder(embed, rnn)
if torch.cuda.is_available():
enc_dec.cuda()
enc_dec = DataParallelModel(enc_dec) if ngpus > 1 else enc_dec
enc_dec.eval()
# Generate
result = enc_dec(init_tensor,
dec_kwargs={'seq_len': opt.seq_len,
'softmax_method': opt.softmax_method,
'sample_method': opt.sample_method,
'tau': hp.tau,
'gumbel_hard': opt.gumbel_hard,
'k': opt.beam_size,
'subwordenc': subwordenc})
probs, ids, texts, extra = zip(*result) if ngpus > 1 else result
if ngpus > 1: # flatten: each gpu returns lists of texts
texts = [batch_text for gpu_texts in texts for batch_text in gpu_texts]
for i in range(batch_size):
print(init_texts[i] + texts[i])
print('-' * 100)
| 33.070513 | 104 | 0.629192 |
import pdb
import torch
import torch.nn as nn
from models.custom_parallel import DataParallelModel
from models.mlstm import StackedLSTMEncoderDecoder
from models.nn_utils import move_to_cuda, setup_gpus, logits_to_prob, prob_to_vocab_id
from project_settings import HParams, PAD_ID, DatasetConfig
from utils import load_file, create_argparse_and_update_hp
.gpus, hp.seed)
ds_conf = DatasetConfig(opt.dataset)
if opt.load_model is None:
opt.load_model = ds_conf.lm_path
opt.softmax_method,
'sample_method': opt.sample_method,
'tau': hp.tau,
'gumbel_hard': opt.gumbel_hard,
'k': opt.beam_size,
'subwordenc': subwordenc})
probs, ids, texts, extra = zip(*result) if ngpus > 1 else result
if ngpus > 1:
texts = [batch_text for gpu_texts in texts for batch_text in gpu_texts]
for i in range(batch_size):
print(init_texts[i] + texts[i])
print('-' * 100)
| true | true |
f721321d98af2205ce169d3d88af0b431e7731ea | 1,235 | py | Python | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | 1 | 2021-01-05T16:24:58.000Z | 2021-01-05T16:24:58.000Z | #
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
def _get_interface_name_list(field, db_object):
ifnames = []
for i in db_object[field]:
ifnames.append(i['ifname'])
return ifnames
class InterfaceBase(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'forihostid': utils.int_or_none,
'iftype': utils.str_or_none,
'ifname': utils.str_or_none,
'networktype': utils.str_or_none,
'ifcapabilities': utils.dict_or_none,
'farend': utils.dict_or_none,
'uses': utils.list_of_strings_or_none,
'used_by': utils.list_of_strings_or_none,
'sriov_numvfs': utils.int_or_none
}
_foreign_fields = {
'uses': _get_interface_name_list,
'used_by': _get_interface_name_list,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.interface_get(uuid)
| 25.204082 | 53 | 0.637247 |
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
def _get_interface_name_list(field, db_object):
ifnames = []
for i in db_object[field]:
ifnames.append(i['ifname'])
return ifnames
class InterfaceBase(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'forihostid': utils.int_or_none,
'iftype': utils.str_or_none,
'ifname': utils.str_or_none,
'networktype': utils.str_or_none,
'ifcapabilities': utils.dict_or_none,
'farend': utils.dict_or_none,
'uses': utils.list_of_strings_or_none,
'used_by': utils.list_of_strings_or_none,
'sriov_numvfs': utils.int_or_none
}
_foreign_fields = {
'uses': _get_interface_name_list,
'used_by': _get_interface_name_list,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.interface_get(uuid)
| true | true |
f72133aff214d90410fb19b8ccb50eafa1390f3b | 12,732 | py | Python | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2021-06-11T19:54:19.000Z | 2021-06-11T19:54:19.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2019-08-30T14:45:33.000Z | 2019-08-30T14:45:33.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for customremotes archives providing dl+archive URLs handling"""
from unittest.mock import patch
import os
import os.path as op
import sys
import re
import logging
import glob
from time import sleep
from ..archives import (
ArchiveAnnexCustomRemote,
link_file_load,
)
from ..base import AnnexExchangeProtocol
from ...support.annexrepo import AnnexRepo
from ...consts import ARCHIVES_SPECIAL_REMOTE
from .test_base import (
BASE_INTERACTION_SCENARIOS,
check_interaction_scenario,
)
from ...tests.utils import (
abspath,
assert_equal,
assert_false,
assert_is_instance,
assert_not_in,
assert_true,
chpwd,
eq_,
get_most_obscure_supported_name,
in_,
known_failure_githubci_win,
ok_,
ok_file_has_content,
serve_path_via_http,
swallow_logs,
swallow_outputs,
with_tempfile,
with_tree,
)
from ...cmd import Runner, GitRunner
from ...utils import (
_path_,
on_linux,
on_osx,
unlink,
)
from . import _get_custom_runner
from ...tests.test_archives import (
fn_archive_obscure,
fn_archive_obscure_ext,
fn_in_archive_obscure,
)
#import line_profiler
#prof = line_profiler.LineProfiler()
# TODO: with_tree ATM for archives creates this nested top directory
# matching archive name, so it will be a/d/test.dat ... we don't want that probably
@known_failure_githubci_win
@with_tree(
tree=(('a.tar.gz', {'d': {fn_in_archive_obscure: '123'}}),
('simple.txt', '123'),
(fn_archive_obscure_ext, (('d', ((fn_in_archive_obscure, '123'),)),)),
(fn_archive_obscure, '123')))
@with_tempfile()
def test_basic_scenario(d, d2):
fn_archive, fn_extracted = fn_archive_obscure_ext, fn_archive_obscure
annex = AnnexRepo(d, runner=_get_custom_runner(d))
annex.init_remote(
ARCHIVES_SPECIAL_REMOTE,
['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,
'autoenable=true'
])
assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)
# We want two maximally obscure names, which are also different
assert(fn_extracted != fn_in_archive_obscure)
annex.add(fn_archive)
annex.commit(msg="Added tarball")
annex.add(fn_extracted)
annex.commit(msg="Added the load file")
# Operations with archive remote URL
annexcr = ArchiveAnnexCustomRemote(path=d)
# few quick tests for get_file_url
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat"), "dl+archive:xyz#path=a.dat")
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat", size=999), "dl+archive:xyz#path=a.dat&size=999")
# see https://github.com/datalad/datalad/issues/441#issuecomment-223376906
# old style
eq_(annexcr._parse_url("dl+archive:xyz/a.dat#size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz/a.dat"), ("xyz", "a.dat", {})) # old format without size
# new style
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat&size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat"), ("xyz", "a.dat", {})) # old format without size
file_url = annexcr.get_file_url(
archive_file=fn_archive,
file=fn_archive.replace('.tar.gz', '') + '/d/' + fn_in_archive_obscure)
annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])
annex.drop(fn_extracted)
list_of_remotes = annex.whereis(fn_extracted, output='descriptions')
in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)
assert_false(annex.file_has_content(fn_extracted))
annex.get(fn_extracted)
assert_true(annex.file_has_content(fn_extracted))
annex.rm_url(fn_extracted, file_url)
assert_false(annex.drop(fn_extracted)['success'])
annex.add_url_to_file(fn_extracted, file_url)
annex.drop(fn_extracted)
annex.get(fn_extracted)
annex.drop(fn_extracted) # so we don't get from this one next
# Let's create a clone and verify chain of getting file through the tarball
cloned_annex = AnnexRepo.clone(d, d2, runner=_get_custom_runner(d2))
# we still need to enable manually atm that special remote for archives
# cloned_annex.enable_remote('annexed-archives')
assert_false(cloned_annex.file_has_content(fn_archive))
assert_false(cloned_annex.file_has_content(fn_extracted))
cloned_annex.get(fn_extracted)
assert_true(cloned_annex.file_has_content(fn_extracted))
# as a result it would also fetch tarball
assert_true(cloned_annex.file_has_content(fn_archive))
# Check if protocol was collected
if os.environ.get('DATALAD_TESTS_PROTOCOLREMOTE'):
assert_is_instance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol)
protocol_file = _path_(annex.path,
'.git/bin/git-annex-remote-datalad-archive')
ok_file_has_content(protocol_file, "VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_in_archive_obscure]) # run git annex drop
assert_false(annex.file_has_content(fpath)) # and verify if file deleted from directory
runner(['git', 'annex', 'get', '--', fn_in_archive_obscure]) # run git annex get
assert_true(annex.file_has_content(fpath)) # and verify if file got into directory
@known_failure_githubci_win
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
# test conversion of relevant env vars from relative_path to correct absolute_path
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
# test if other environment variables passed to function returned unaltered
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
# test import of sys_env if no environment passed to function
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
# rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote
# since that adds 300ms delay for no immediate use
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False,
log_stderr=False)
# print cmo.out
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
# Just a placeholder since constructor expects a repo
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
# by default we do not require any fancy init
# no urls supported by default
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
# we know that is just a single option, url, is expected so full
# one would be passed
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
# basic interaction failing to fetch content from archive
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
# # incorrect response received from annex -- something isn't right but ... later
# [
# ('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import add_archive_content
from datalad.api import create
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
"""Return stats on the file which should have been preserved"""
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
if on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (either originally for windows, or after mocked call)
ok_(inode(tempfile) != inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| 37.011628 | 116 | 0.654807 |
"VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_in_archive_obscure])
assert_false(annex.file_has_content(fpath))
runner(['git', 'annex', 'get', '--', fn_in_archive_obscure])
assert_true(annex.file_has_content(fpath))
@known_failure_githubci_win
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False,
log_stderr=False)
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
key dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import add_archive_content
from datalad.api import create
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
if on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (either originally for windows, or after mocked call)
ok_(inode(tempfile) != inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| true | true |
f7213680738375f96a2084c20c14a5024e5d194e | 5,679 | py | Python | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Management of PostgreSQL extensions (e.g.: postgis)
===================================================
The postgres_extensions module is used to create and manage Postgres extensions.
.. code-block:: yaml
adminpack:
postgres_extension.present
'''
# Import Python libs
import logging
# Import salt libs
from salt.modules import postgres
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
return 'postgres.create_extension' in __salt__
def present(name,
if_not_exists=None,
schema=None,
ext_version=None,
from_version=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is present with the specified privileges
name
The name of the extension to manage
if_not_exists
Add a if_not_exists switch to the ddl statement
schema
Schema to install the extension into
from_version
Old extension version if already installed
ext_version
version to install
user
System user all operations should be performed on behalf of
maintenance_db
Database to act on
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Extention {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists
mode = 'create'
mtdata = __salt__['postgres.create_metadata'](
name,
schema=schema,
ext_version=ext_version,
**db_args)
# The extension is not present, install it!
toinstall = postgres._EXTENSION_NOT_INSTALLED in mtdata
if toinstall:
mode = 'install'
toupgrade = False
if postgres._EXTENSION_INSTALLED in mtdata:
for flag in [
postgres._EXTENSION_TO_MOVE,
postgres._EXTENSION_TO_UPGRADE
]:
if flag in mtdata:
toupgrade = True
mode = 'upgrade'
if __opts__['test']:
ret['result'] = None
if mode:
ret['comment'] = 'Extension {0} is set to be {1}ed'.format(
name, mode).replace('eed', 'ed')
return ret
cret = None
if toinstall or toupgrade:
cret = __salt__['postgres.create_extension'](
name=name,
if_not_exists=if_not_exists,
schema=schema,
ext_version=ext_version,
from_version=from_version,
**db_args)
if cret:
ret['comment'] = 'The extension {0} has been {1}ed'.format(name, mode)
elif cret is not None:
ret['comment'] = 'Failed to {1} extension {0}'.format(name, mode)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is absent
name
Extension name of the extension to remove
cascade
Drop on cascade
if_exists
Add if exist slug
restrict
Add restrict slug
maintenance_db
Database to act on
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists and remove it
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| 26.537383 | 80 | 0.559606 |
import logging
from salt.modules import postgres
log = logging.getLogger(__name__)
def __virtual__():
return 'postgres.create_extension' in __salt__
def present(name,
if_not_exists=None,
schema=None,
ext_version=None,
from_version=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Extention {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
mode = 'create'
mtdata = __salt__['postgres.create_metadata'](
name,
schema=schema,
ext_version=ext_version,
**db_args)
toinstall = postgres._EXTENSION_NOT_INSTALLED in mtdata
if toinstall:
mode = 'install'
toupgrade = False
if postgres._EXTENSION_INSTALLED in mtdata:
for flag in [
postgres._EXTENSION_TO_MOVE,
postgres._EXTENSION_TO_UPGRADE
]:
if flag in mtdata:
toupgrade = True
mode = 'upgrade'
if __opts__['test']:
ret['result'] = None
if mode:
ret['comment'] = 'Extension {0} is set to be {1}ed'.format(
name, mode).replace('eed', 'ed')
return ret
cret = None
if toinstall or toupgrade:
cret = __salt__['postgres.create_extension'](
name=name,
if_not_exists=if_not_exists,
schema=schema,
ext_version=ext_version,
from_version=from_version,
**db_args)
if cret:
ret['comment'] = 'The extension {0} has been {1}ed'.format(name, mode)
elif cret is not None:
ret['comment'] = 'Failed to {1} extension {0}'.format(name, mode)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| true | true |
f72136d4758f64cffd134ac0eafb88595992711c | 10,305 | py | Python | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
from collections import OrderedDict
from contextlib import contextmanager
import json
import logging
from numbers import Number
import os
import sys
import torch
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if args.tensorboard_logdir and distributed_utils.is_master(args):
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class progress_bar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class json_progress_bar(progress_bar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
update = (
self.epoch - 1 + float(i / size)
if self.epoch is not None
else None
)
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
with rename_logger(logger, self.tag):
logger.info(json.dumps(stats))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = stats
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class noop_progress_bar(progress_bar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class simple_progress_bar(progress_bar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
postfix = self._str_commas(self.stats)
with rename_logger(logger, self.tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = self._format_stats(stats)
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class tqdm_progress_bar(progress_bar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
try:
from tensorboardX import SummaryWriter
_tensorboard_writers = {}
except ImportError:
SummaryWriter = None
class tensorboard_log_wrapper(progress_bar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir, args):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
self.args = args
if SummaryWriter is None:
logger.warning(
"tensorboard or required dependencies not found, please see README "
"for using tensorboard. (e.g. pip install tensorboardX)"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('args', str(vars(self.args)))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
| 33.028846 | 104 | 0.612033 |
from collections import OrderedDict
from contextlib import contextmanager
import json
import logging
from numbers import Number
import os
import sys
import torch
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if args.tensorboard_logdir and distributed_utils.is_master(args):
try:
import palaas
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class progress_bar(object):
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
raise NotImplementedError
def print(self, stats, tag=None, step=None):
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class json_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
update = (
self.epoch - 1 + float(i / size)
if self.epoch is not None
else None
)
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
with rename_logger(logger, self.tag):
logger.info(json.dumps(stats))
def log(self, stats, tag=None, step=None):
self.stats = stats
self.tag = tag
def print(self, stats, tag=None, step=None):
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class noop_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
pass
def print(self, stats, tag=None, step=None):
pass
class simple_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
postfix = self._str_commas(self.stats)
with rename_logger(logger, self.tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))
def log(self, stats, tag=None, step=None):
self.stats = self._format_stats(stats)
self.tag = tag
def print(self, stats, tag=None, step=None):
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class tqdm_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
try:
from tensorboardX import SummaryWriter
_tensorboard_writers = {}
except ImportError:
SummaryWriter = None
class tensorboard_log_wrapper(progress_bar):
def __init__(self, wrapped_bar, tensorboard_logdir, args):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
self.args = args
if SummaryWriter is None:
logger.warning(
"tensorboard or required dependencies not found, please see README "
"for using tensorboard. (e.g. pip install tensorboardX)"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('args', str(vars(self.args)))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
| true | true |
f72137bf78f559bfe97edbab30e7988e13d94b58 | 327 | py | Python | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('api/', include('truck.urls')),
]
| 27.25 | 54 | 0.700306 | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('api/', include('truck.urls')),
]
| true | true |
f72137f9085fd5053b6df852024426c114db9e8c | 7,174 | py | Python | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | 1 | 2021-12-16T11:53:20.000Z | 2021-12-16T11:53:20.000Z | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | null | null | null | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import scipy.sparse as sp
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from multiprocessing import Pool
from tqdm import tqdm
import time
from cogdl.utils import alias_draw, alias_setup
from .. import BaseModel
class NetSMF(BaseModel):
r"""The NetSMF model from the `"NetSMF: Large-Scale Network Embedding as Sparse Matrix Factorization"
<http://arxiv.org/abs/1710.02971>`_ paper.
Args:
hidden_size (int) : The dimension of node representation.
window_size (int) : The actual context size which is considered in language model.
negative (int) : The number of nagative samples in negative sampling.
num_round (int) : The number of round in NetSMF.
worker (int) : The number of workers for NetSMF.
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--window-size", type=int, default=10,
help="Window size of approximate matrix. Default is 10.")
parser.add_argument("--negative", type=int, default=1,
help="Number of negative node in sampling. Default is 1.")
parser.add_argument("--num-round", type=int, default=100,
help="Number of round in NetSMF. Default is 100.")
parser.add_argument("--worker", type=int, default=10,
help="Number of parallel workers. Default is 10.")
parser.add_argument("--hidden-size", type=int, default=128)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.window_size,
args.negative,
args.num_round,
args.worker,
)
def __init__(self, dimension, window_size, negative, num_round, worker):
super(NetSMF, self).__init__()
self.dimension = dimension
self.window_size = window_size
self.negative = negative
self.worker = worker
self.num_round = num_round
def train(self, graph, return_dict=False):
return self.forward(graph, return_dict)
def forward(self, graph, return_dict=False):
self.G = graph.to_networkx()
node2id = dict([(node, vid) for vid, node in enumerate(self.G.nodes())])
self.is_directed = nx.is_directed(self.G)
self.num_node = self.G.number_of_nodes()
self.num_edge = self.G.number_of_edges()
self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
id2node = dict(zip(node2id.values(), node2id.keys()))
self.num_neigh = np.asarray([len(list(self.G.neighbors(id2node[i]))) for i in range(self.num_node)])
self.neighbors = [[node2id[v] for v in self.G.neighbors(id2node[i])] for i in range(self.num_node)]
s = time.time()
self.alias_nodes = {}
self.node_weight = {}
for i in range(self.num_node):
unnormalized_probs = [self.G[id2node[i]][nbr].get("weight", 1.0) for nbr in self.G.neighbors(id2node[i])]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
self.alias_nodes[i] = alias_setup(normalized_probs)
self.node_weight[i] = dict(
zip(
[node2id[nbr] for nbr in self.G.neighbors(id2node[i])],
unnormalized_probs,
)
)
t = time.time()
print("alias_nodes", t - s)
# run netsmf algorithm with multiprocessing and apply randomized svd
print("number of sample edges ", self.num_round * self.num_edge * self.window_size)
print("random walk start...")
t0 = time.time()
results = []
pool = Pool(processes=self.worker)
for i in range(self.worker):
results.append(pool.apply_async(func=self._random_walk_matrix, args=(i,)))
pool.close()
pool.join()
print("random walk time", time.time() - t0)
matrix = sp.csr_matrix((self.num_node, self.num_node))
A = sp.csr_matrix(nx.adjacency_matrix(self.G))
degree = sp.diags(np.array(A.sum(axis=0))[0], format="csr")
degree_inv = degree.power(-1)
t1 = time.time()
for res in results:
matrix += res.get()
t2 = time.time()
print("construct random walk matrix time", time.time() - t1)
L = sp.csgraph.laplacian(matrix, normed=False, return_diag=False)
M = degree_inv.dot(degree - L).dot(degree_inv)
M = M * A.sum() / self.negative
M.data[M.data <= 1] = 1
M.data = np.log(M.data)
M.eliminate_zeros()
print("number of nzz", M.nnz)
print("construct matrix sparsifier time", time.time() - t2)
embeddings = self._get_embedding_rand(M)
if return_dict:
features_matrix = dict()
for vid, node in enumerate(self.G.nodes()):
features_matrix[node] = embeddings[vid]
else:
features_matrix = np.zeros((graph.num_nodes, embeddings.shape[1]))
nx_nodes = self.G.nodes()
features_matrix[nx_nodes] = embeddings[np.arange(graph.num_nodes)]
return features_matrix
def _get_embedding_rand(self, matrix):
# Sparse randomized tSVD for fast embedding
t1 = time.time()
l = matrix.shape[0] # noqa E741
smat = sp.csc_matrix(matrix)
print("svd sparse", smat.data.shape[0] * 1.0 / l ** 2)
U, Sigma, VT = randomized_svd(smat, n_components=self.dimension, n_iter=5, random_state=None)
U = U * np.sqrt(Sigma)
U = preprocessing.normalize(U, "l2")
print("sparsesvd time", time.time() - t1)
return U
def _path_sampling(self, u, v, r):
# sample a r-length path from edge(u, v) and return path end node
k = np.random.randint(r) + 1
zp, rand_u, rand_v = 2.0 / self.node_weight[u][v], k - 1, r - k
for i in range(rand_u):
new_u = self.neighbors[u][alias_draw(self.alias_nodes[u][0], self.alias_nodes[u][1])]
zp += 2.0 / self.node_weight[u][new_u]
u = new_u
for j in range(rand_v):
new_v = self.neighbors[v][alias_draw(self.alias_nodes[v][0], self.alias_nodes[v][1])]
zp += 2.0 / self.node_weight[v][new_v]
v = new_v
return u, v, zp
def _random_walk_matrix(self, pid):
# construct matrix based on random walk
np.random.seed(pid)
matrix = sp.lil_matrix((self.num_node, self.num_node))
for i in tqdm(range(self.num_edge * self.num_round // self.worker)):
u, v = self.edges[i % self.num_edge]
if not self.is_directed and np.random.rand() > 0.5:
v, u = u, v
for r in range(1, self.window_size + 1):
u_, v_, zp = self._path_sampling(u, v, r)
matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp
return matrix.tocsr()
| 41.229885 | 117 | 0.598411 | import numpy as np
import networkx as nx
import scipy.sparse as sp
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from multiprocessing import Pool
from tqdm import tqdm
import time
from cogdl.utils import alias_draw, alias_setup
from .. import BaseModel
class NetSMF(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument("--window-size", type=int, default=10,
help="Window size of approximate matrix. Default is 10.")
parser.add_argument("--negative", type=int, default=1,
help="Number of negative node in sampling. Default is 1.")
parser.add_argument("--num-round", type=int, default=100,
help="Number of round in NetSMF. Default is 100.")
parser.add_argument("--worker", type=int, default=10,
help="Number of parallel workers. Default is 10.")
parser.add_argument("--hidden-size", type=int, default=128)
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.window_size,
args.negative,
args.num_round,
args.worker,
)
def __init__(self, dimension, window_size, negative, num_round, worker):
super(NetSMF, self).__init__()
self.dimension = dimension
self.window_size = window_size
self.negative = negative
self.worker = worker
self.num_round = num_round
def train(self, graph, return_dict=False):
return self.forward(graph, return_dict)
def forward(self, graph, return_dict=False):
self.G = graph.to_networkx()
node2id = dict([(node, vid) for vid, node in enumerate(self.G.nodes())])
self.is_directed = nx.is_directed(self.G)
self.num_node = self.G.number_of_nodes()
self.num_edge = self.G.number_of_edges()
self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
id2node = dict(zip(node2id.values(), node2id.keys()))
self.num_neigh = np.asarray([len(list(self.G.neighbors(id2node[i]))) for i in range(self.num_node)])
self.neighbors = [[node2id[v] for v in self.G.neighbors(id2node[i])] for i in range(self.num_node)]
s = time.time()
self.alias_nodes = {}
self.node_weight = {}
for i in range(self.num_node):
unnormalized_probs = [self.G[id2node[i]][nbr].get("weight", 1.0) for nbr in self.G.neighbors(id2node[i])]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
self.alias_nodes[i] = alias_setup(normalized_probs)
self.node_weight[i] = dict(
zip(
[node2id[nbr] for nbr in self.G.neighbors(id2node[i])],
unnormalized_probs,
)
)
t = time.time()
print("alias_nodes", t - s)
print("number of sample edges ", self.num_round * self.num_edge * self.window_size)
print("random walk start...")
t0 = time.time()
results = []
pool = Pool(processes=self.worker)
for i in range(self.worker):
results.append(pool.apply_async(func=self._random_walk_matrix, args=(i,)))
pool.close()
pool.join()
print("random walk time", time.time() - t0)
matrix = sp.csr_matrix((self.num_node, self.num_node))
A = sp.csr_matrix(nx.adjacency_matrix(self.G))
degree = sp.diags(np.array(A.sum(axis=0))[0], format="csr")
degree_inv = degree.power(-1)
t1 = time.time()
for res in results:
matrix += res.get()
t2 = time.time()
print("construct random walk matrix time", time.time() - t1)
L = sp.csgraph.laplacian(matrix, normed=False, return_diag=False)
M = degree_inv.dot(degree - L).dot(degree_inv)
M = M * A.sum() / self.negative
M.data[M.data <= 1] = 1
M.data = np.log(M.data)
M.eliminate_zeros()
print("number of nzz", M.nnz)
print("construct matrix sparsifier time", time.time() - t2)
embeddings = self._get_embedding_rand(M)
if return_dict:
features_matrix = dict()
for vid, node in enumerate(self.G.nodes()):
features_matrix[node] = embeddings[vid]
else:
features_matrix = np.zeros((graph.num_nodes, embeddings.shape[1]))
nx_nodes = self.G.nodes()
features_matrix[nx_nodes] = embeddings[np.arange(graph.num_nodes)]
return features_matrix
def _get_embedding_rand(self, matrix):
t1 = time.time()
l = matrix.shape[0]
smat = sp.csc_matrix(matrix)
print("svd sparse", smat.data.shape[0] * 1.0 / l ** 2)
U, Sigma, VT = randomized_svd(smat, n_components=self.dimension, n_iter=5, random_state=None)
U = U * np.sqrt(Sigma)
U = preprocessing.normalize(U, "l2")
print("sparsesvd time", time.time() - t1)
return U
def _path_sampling(self, u, v, r):
k = np.random.randint(r) + 1
zp, rand_u, rand_v = 2.0 / self.node_weight[u][v], k - 1, r - k
for i in range(rand_u):
new_u = self.neighbors[u][alias_draw(self.alias_nodes[u][0], self.alias_nodes[u][1])]
zp += 2.0 / self.node_weight[u][new_u]
u = new_u
for j in range(rand_v):
new_v = self.neighbors[v][alias_draw(self.alias_nodes[v][0], self.alias_nodes[v][1])]
zp += 2.0 / self.node_weight[v][new_v]
v = new_v
return u, v, zp
def _random_walk_matrix(self, pid):
np.random.seed(pid)
matrix = sp.lil_matrix((self.num_node, self.num_node))
for i in tqdm(range(self.num_edge * self.num_round // self.worker)):
u, v = self.edges[i % self.num_edge]
if not self.is_directed and np.random.rand() > 0.5:
v, u = u, v
for r in range(1, self.window_size + 1):
u_, v_, zp = self._path_sampling(u, v, r)
matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp
return matrix.tocsr()
| true | true |
f7213978ccd0f01659e3efbbcfb25973a4e526b4 | 877 | py | Python | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | from wtforms import MultipleFileField, SelectField, StringField
from wtforms.validators import InputRequired
from CTFd.forms import BaseForm
from CTFd.forms.fields import SubmitField
class ChallengeSearchForm(BaseForm):
field = SelectField(
"Search Field",
choices=[
("name", "Name"),
("id", "ID"),
("category", "Category"),
("type", "Type"),
],
default="name",
validators=[InputRequired()],
)
q = StringField("Parameter", validators=[InputRequired()])
submit = SubmitField("Search")
class ChallengeFilesUploadForm(BaseForm):
file = MultipleFileField(
"Upload Files",
description="Attach multiple files using Control+Click or Cmd+Click.",
validators=[InputRequired()],
)
submit = SubmitField("Upload")
| 28.290323 | 79 | 0.608894 | from wtforms import MultipleFileField, SelectField, StringField
from wtforms.validators import InputRequired
from CTFd.forms import BaseForm
from CTFd.forms.fields import SubmitField
class ChallengeSearchForm(BaseForm):
field = SelectField(
"Search Field",
choices=[
("name", "Name"),
("id", "ID"),
("category", "Category"),
("type", "Type"),
],
default="name",
validators=[InputRequired()],
)
q = StringField("Parameter", validators=[InputRequired()])
submit = SubmitField("Search")
class ChallengeFilesUploadForm(BaseForm):
file = MultipleFileField(
"Upload Files",
description="Attach multiple files using Control+Click or Cmd+Click.",
validators=[InputRequired()],
)
submit = SubmitField("Upload")
| true | true |
f72139a7d81181702d6d08b0921ed75b1e2aa778 | 2,422 | py | Python | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | 1 | 2021-12-03T02:39:29.000Z | 2021-12-03T02:39:29.000Z | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | from staicoin.util.ints import uint32, uint64
# 1 stai coin = 1,000,000,000 = 1 billion mojo.
_mojo_per_staicoin = 1000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 4/5 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((992 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((4 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((2 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/5 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously. Bonus to the dev who contributed starting the blockchain !
"""
if height == 0:
return uint64(int((8 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_officialwallets_reward(height: uint32) -> uint64:
"""
Community Rewards: 1 stai every block at stage 1 & 2 & 3
"""
if height == 0:
return uint64(int((1 / 6) * 0 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 6) * 6 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 3) * 3 * _mojo_per_staicoin))
else:
return uint64(int((1 / 2) * 2 * _mojo_per_staicoin))
| 42.491228 | 116 | 0.67052 | from staicoin.util.ints import uint32, uint64
_mojo_per_staicoin = 1000000000
_blocks_per_year = 1681920
def calculate_pool_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((992 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((4 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((2 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_farmer_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((8 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_officialwallets_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((1 / 6) * 0 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 6) * 6 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 3) * 3 * _mojo_per_staicoin))
else:
return uint64(int((1 / 2) * 2 * _mojo_per_staicoin))
| true | true |
f7213d02b27c785ef91695bcb230eaaa02989fb9 | 6,910 | py | Python | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
VERSION = '1.0'
fields = {
# FIXME(danms): some of these can be enums?
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
}
# NOTE(danms): We don't have a connection_info object right
# now, and instead mostly store/pass it as JSON that we're
# careful with. When we get a connection_info object in the
# future, we should use it here, so make this easy to convert
# for later.
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
fields = {
'filename': fields.StringField(),
# FIXME: image_type should be enum?
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
}
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
| 38.603352 | 78 | 0.639363 |
from oslo_log import log
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
VERSION = '1.0'
fields = {
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
}
# now, and instead mostly store/pass it as JSON that we're
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
fields = {
'filename': fields.StringField(),
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
}
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
| true | true |
f72141757d175109c5dcb045ecabaf1763ea12c7 | 1,249 | py | Python | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | 1 | 2019-10-07T11:14:33.000Z | 2019-10-07T11:14:33.000Z | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | 15 | 2019-10-07T10:57:58.000Z | 2019-10-13T12:35:19.000Z | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-06 13:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=200)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('adress', models.CharField(max_length=300)),
('zip_code', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 37.848485 | 122 | 0.604484 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=200)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('adress', models.CharField(max_length=300)),
('zip_code', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7214236590167f0e9c078503c47ef27d6da679f | 19,636 | py | Python | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:47.000Z | 2022-03-18T13:15:17.000Z | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 3 | 2021-11-05T07:56:16.000Z | 2022-03-27T13:27:05.000Z | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:51.000Z | 2022-01-17T13:20:40.000Z | import logging
import os
import pathlib
from datetime import timedelta
from pathlib import Path
from typing import List
from warnings import warn
import pydantic
import pytest
from pydantic.color import Color
from teamcity import is_running_under_teamcity
import geolib.models.dsettlement.loads as loads
import geolib.soils as soil_external
from geolib.geometry.one import Point
from geolib.models import BaseModel
from geolib.models.dsettlement.dsettlement_model import DSettlementModel
from geolib.models.dsettlement.internal import (
Bool,
Boundaries,
Boundary,
ConsolidationModel,
Curve,
Curves,
Dimension,
DispersionConditionLayerBoundary,
DSeriePoint,
DSettlementStructure,
GeometryData,
Layer,
Layers,
Model,
Points,
PreconPressureWithinLayer,
SoilModel,
StrainType,
StressDistributionLoads,
StressDistributionSoil,
Version,
)
from geolib.models.dsettlement.loads import RectangularLoad
from geolib.soils import (
IsotacheParameters,
Soil,
SoilClassificationParameters,
SoilWeightParameters,
StateType,
)
from tests.utils import TestUtils, only_teamcity
class TestDSettlementAcceptance:
def setup_class(self):
self.soils = [
Soil(
name="Sand",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=19.0, unsaturated_weight=17.0
),
),
Soil(
name="Peat",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=10.0, unsaturated_weight=10.0
),
),
Soil(
name="Clay",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=14.0, unsaturated_weight=14.0
),
),
Soil(
name="Embankement",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=16.0, unsaturated_weight=16.0
),
),
]
self.points = [
Point(x=-50, z=0.0), # 0 top layer
Point(x=-10, z=0.0), # 1
Point(x=0, z=2), # 2
Point(x=10, z=2), # 3
Point(x=30, z=0.0), # 4
Point(x=50, z=0.0), # 5
Point(x=-50, z=-5), # 6 second layer
Point(x=50, z=-5), # 7
Point(x=-50, z=-10), # 8 third layer
Point(x=50, z=-10), # 9
Point(x=-50, z=-20), # 10 fourth layer
Point(x=50, z=-20), # 11
Point(x=-50, z=-2), # 12 phreatic line
Point(x=50, z=-2), # 13
Point(x=-50, z=1), # 14 headline 1
Point(x=50, z=1), # 15
]
dm = DSettlementModel()
self.outputdir = Path(
TestUtils.get_output_test_data_dir("dsettlement/acceptancetest/")
)
self.inputfile = Path(
TestUtils.get_test_data_dir("test_data/dsettlement", "2dgeom_with10.sld")
)
@pytest.mark.systemtest
def test_dsettlement_empty(self):
dm = DSettlementModel()
path = self.outputdir / "test_empty.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soils(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
path = self.outputdir / "test_add_soils.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soil_koppejan(self):
dm = DSettlementModel()
# TODO adding soils is too complex
# should be something like
# Soil(
# soilcp = 100.,
# soilcp1 = 10.,
# etc.
# )
soil_input = Soil(name="MyNewSoil")
soil_input.soil_classification_parameters = SoilClassificationParameters()
soil_input.soil_weight_parameters = soil_external.SoilWeightParameters()
soil_input.soil_weight_parameters.saturated_weight = (
soil_external.StochasticParameter(mean=20)
)
soil_input.soil_weight_parameters.unsaturated_weight = (
soil_external.StochasticParameter(mean=30)
)
soil_input.soil_classification_parameters.initial_void_ratio = (
soil_external.StochasticParameter(mean=0.1)
)
soil_input.koppejan_parameters = soil_external.KoppejanParameters(
precon_koppejan_type=StateType.YIELD_STRESS
)
soil_input.soil_state = soil_external.SoilState(
use_equivalent_age=True, equivalent_age=2
)
soil_input.koppejan_parameters.preconsolidation_pressure = (
soil_external.StochasticParameter(mean=10)
)
dm.add_soil(soil_input)
path = self.outputdir / "test_add_soil_koppejan.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_simple_geometry(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_simple_geometry.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_headlines(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
hl_id = dm.add_head_line(
points=[self.points[14], self.points[15]], is_phreatic=False
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=hl_id,
head_line_bottom=hl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=99,
head_line_bottom=hl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=99,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_headlines.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_non_uniform_load(
"traffic",
points=[self.points[2], Point(x=1, z=3), Point(x=9, z=3), self.points[3]],
gamma_wet=25.0,
gamma_dry=25.0,
time_start=timedelta(days=0),
time_end=timedelta(days=1000),
)
path = self.outputdir / "test_add_load.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_verticals(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
path = self.outputdir / "test_set_verticals.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_model(self):
# koppejan, natural strain, darcy, vertical drains
dm = DSettlementModel()
dm.set_model(
constitutive_model=SoilModel.NEN_KOPPEJAN,
consolidation_model=ConsolidationModel.DARCY,
is_vertical_drain=True,
strain_type=StrainType.NATURAL,
is_two_dimensional=True,
is_fit_for_settlement_plate=False,
is_probabilistic=False,
is_horizontal_displacements=False,
is_secondary_swelling=True, # TODO document this parameter
is_waspan=False, # TODO document this parameter
)
path = self.outputdir / "test_set_model.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_residualtimes(self):
# koppejan, natural strain, darcy, vertical drains
dm = DSettlementModel()
dm.set_calculation_times(
time_steps=[timedelta(days=d) for d in [10, 100, 1000, 2000, 3000, 4000]]
)
path = self.outputdir / "test_set_residualtimes.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_layerload(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_layerload.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_other_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=5.0, y=10.0, z=2.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_other_load.sli"
dm.serialize(path)
@pytest.mark.acceptance
@pytest.mark.xfail # Wrong soils for now
@only_teamcity
def test_sorting_vertical_layer_boundaries(self):
"""
Test sorting boundaries with 2 vertical layer boundaries
Returns:
"""
points = [
Point(x=-50, z=-10), # 0
Point(x=50, z=-10), # 1
Point(x=-50, z=0.0), # 2
Point(x=0, z=0.0), # 3
Point(x=0.0, z=-10.0), # 4
Point(x=-50, z=-20), # 5
Point(x=50, z=-20), # 6
Point(x=50, z=0.0), # 7
]
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(points=[points[0], points[1]], is_phreatic=True)
b1 = dm.add_boundary(points=[points[0], points[4], points[1]])
b2 = dm.add_boundary(points=[points[2], points[3], points[7]])
b3 = dm.add_boundary(points=[points[0], points[4], points[3], points[7]])
b4 = dm.add_boundary(points=[points[5], points[6]])
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b1,
boundary_bottom=b4,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b1,
)
l3 = dm.add_layer(
material_name="peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b3,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=-5.0, y=-5.0, z=0.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
# For manual checks
path = self.outputdir / "test_sort_vertical_layer_boundaries.sli"
dm.serialize(path)
# Verify geometry is correct and we can parse output
dm.execute() # will raise on execution error
assert dm.datastructure
| 30.443411 | 86 | 0.544052 | import logging
import os
import pathlib
from datetime import timedelta
from pathlib import Path
from typing import List
from warnings import warn
import pydantic
import pytest
from pydantic.color import Color
from teamcity import is_running_under_teamcity
import geolib.models.dsettlement.loads as loads
import geolib.soils as soil_external
from geolib.geometry.one import Point
from geolib.models import BaseModel
from geolib.models.dsettlement.dsettlement_model import DSettlementModel
from geolib.models.dsettlement.internal import (
Bool,
Boundaries,
Boundary,
ConsolidationModel,
Curve,
Curves,
Dimension,
DispersionConditionLayerBoundary,
DSeriePoint,
DSettlementStructure,
GeometryData,
Layer,
Layers,
Model,
Points,
PreconPressureWithinLayer,
SoilModel,
StrainType,
StressDistributionLoads,
StressDistributionSoil,
Version,
)
from geolib.models.dsettlement.loads import RectangularLoad
from geolib.soils import (
IsotacheParameters,
Soil,
SoilClassificationParameters,
SoilWeightParameters,
StateType,
)
from tests.utils import TestUtils, only_teamcity
class TestDSettlementAcceptance:
def setup_class(self):
self.soils = [
Soil(
name="Sand",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=19.0, unsaturated_weight=17.0
),
),
Soil(
name="Peat",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=10.0, unsaturated_weight=10.0
),
),
Soil(
name="Clay",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=14.0, unsaturated_weight=14.0
),
),
Soil(
name="Embankement",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=16.0, unsaturated_weight=16.0
),
),
]
self.points = [
Point(x=-50, z=0.0),
Point(x=-10, z=0.0),
Point(x=0, z=2),
Point(x=10, z=2),
Point(x=30, z=0.0),
Point(x=50, z=0.0),
Point(x=-50, z=-5),
Point(x=50, z=-5),
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=-50, z=-20),
Point(x=50, z=-20),
Point(x=-50, z=-2),
Point(x=50, z=-2),
Point(x=-50, z=1),
Point(x=50, z=1),
]
dm = DSettlementModel()
self.outputdir = Path(
TestUtils.get_output_test_data_dir("dsettlement/acceptancetest/")
)
self.inputfile = Path(
TestUtils.get_test_data_dir("test_data/dsettlement", "2dgeom_with10.sld")
)
@pytest.mark.systemtest
def test_dsettlement_empty(self):
dm = DSettlementModel()
path = self.outputdir / "test_empty.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soils(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
path = self.outputdir / "test_add_soils.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soil_koppejan(self):
dm = DSettlementModel()
soil_input = Soil(name="MyNewSoil")
soil_input.soil_classification_parameters = SoilClassificationParameters()
soil_input.soil_weight_parameters = soil_external.SoilWeightParameters()
soil_input.soil_weight_parameters.saturated_weight = (
soil_external.StochasticParameter(mean=20)
)
soil_input.soil_weight_parameters.unsaturated_weight = (
soil_external.StochasticParameter(mean=30)
)
soil_input.soil_classification_parameters.initial_void_ratio = (
soil_external.StochasticParameter(mean=0.1)
)
soil_input.koppejan_parameters = soil_external.KoppejanParameters(
precon_koppejan_type=StateType.YIELD_STRESS
)
soil_input.soil_state = soil_external.SoilState(
use_equivalent_age=True, equivalent_age=2
)
soil_input.koppejan_parameters.preconsolidation_pressure = (
soil_external.StochasticParameter(mean=10)
)
dm.add_soil(soil_input)
path = self.outputdir / "test_add_soil_koppejan.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_simple_geometry(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_simple_geometry.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_headlines(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
hl_id = dm.add_head_line(
points=[self.points[14], self.points[15]], is_phreatic=False
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=hl_id,
head_line_bottom=hl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=99,
head_line_bottom=hl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=99,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_headlines.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_non_uniform_load(
"traffic",
points=[self.points[2], Point(x=1, z=3), Point(x=9, z=3), self.points[3]],
gamma_wet=25.0,
gamma_dry=25.0,
time_start=timedelta(days=0),
time_end=timedelta(days=1000),
)
path = self.outputdir / "test_add_load.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_verticals(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
path = self.outputdir / "test_set_verticals.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_model(self):
dm = DSettlementModel()
dm.set_model(
constitutive_model=SoilModel.NEN_KOPPEJAN,
consolidation_model=ConsolidationModel.DARCY,
is_vertical_drain=True,
strain_type=StrainType.NATURAL,
is_two_dimensional=True,
is_fit_for_settlement_plate=False,
is_probabilistic=False,
is_horizontal_displacements=False,
is_secondary_swelling=True,
is_waspan=False,
)
path = self.outputdir / "test_set_model.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_residualtimes(self):
dm = DSettlementModel()
dm.set_calculation_times(
time_steps=[timedelta(days=d) for d in [10, 100, 1000, 2000, 3000, 4000]]
)
path = self.outputdir / "test_set_residualtimes.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_layerload(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_layerload.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_other_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=5.0, y=10.0, z=2.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_other_load.sli"
dm.serialize(path)
@pytest.mark.acceptance
@pytest.mark.xfail
@only_teamcity
def test_sorting_vertical_layer_boundaries(self):
points = [
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=-50, z=0.0),
Point(x=0, z=0.0),
Point(x=0.0, z=-10.0),
Point(x=-50, z=-20),
Point(x=50, z=-20),
Point(x=50, z=0.0),
]
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(points=[points[0], points[1]], is_phreatic=True)
b1 = dm.add_boundary(points=[points[0], points[4], points[1]])
b2 = dm.add_boundary(points=[points[2], points[3], points[7]])
b3 = dm.add_boundary(points=[points[0], points[4], points[3], points[7]])
b4 = dm.add_boundary(points=[points[5], points[6]])
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b1,
boundary_bottom=b4,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b1,
)
l3 = dm.add_layer(
material_name="peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b3,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=-5.0, y=-5.0, z=0.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_sort_vertical_layer_boundaries.sli"
dm.serialize(path)
dm.execute()
assert dm.datastructure
| true | true |
f7214283b1a050b951ccaeb5b99108ab85e04e6d | 48,023 | py | Python | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | """
# Setup script for index based on Richard Sharpe's List of Identifications
"""
#--------------------------------------------------------------------------------
import math
from django.template import Context, loader
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.db import connection
from urllib import quote, unquote
from mysite.config import *
from mysite.MLGBsolr import *
import mysite.mlgb.views as mv
#--------------------------------------------------------------------------------
solr_query = '' # for debugging
printing = False
editable = False
baseurl="/authortitle"
medieval_catalogues_url = "/authortitle/medieval_catalogues"
mlgb_book_url = '/mlgb/book'
default_order_by = "solr_id_sort"
catalogue_provenance_sort_list = [ "s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
catalogue_date_sort_list = [ "d_document_start asc",
"d_document_end asc",
"s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
searchable_fields = [
{ "fieldname": "text", "label": "All fields", "info": "", "value": "" },
{ "fieldname": "t_author", "label": "Author", "info": "", "value": "" },
{ "fieldname": "t_title", "label": "Title of book", "info": "", "value": "" },
{ "fieldname": "t_bibliography", "label": "Bibliographical details", "info": "", "value": "" },
{ "fieldname": "t_library", "label": "Catalogue provenance",
"info": "E.g. 'Benedictines Peterborough' or 'Henry de Kirkestede'", "value": "" },
{ "fieldname": "t_document", "label": "Description of document", "value": "", "info":
"E.g. 'Books read in the refectory, 13th cent'." \
+ " Description includes either an indication of document date or the word 'undated'." },
{ "fieldname": "s_document_type", "label": "Type of document", "value": "", "info": "" },
# The next 2 fields do not map directly to ones in the Solr index.
# We'll use them to query on s_document_start/end_year
{ "fieldname": "q_earliest_year", "label": "Start of required date range", "value": "",
"info": "Enter the earliest year that you are interested in, e.g. 1400." },
{ "fieldname": "q_latest_year", "label": "End of required date range", "value": "",
"info": "Enter the latest year that you are interested in, e.g. 1499." },
]
facet = False
newline = '\n'
carriage_return = '\r'
right_arrow = '→'
biblio_block_line_length = 100
#================= Top-level functions, called directly from URL ================
#--------------------------------------------------------------------------------
# The function browse() allows browsing of the index by author/title
def browse( request, letter = '', pagename = 'index', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
# The call to 'enable edit' is just so that they can (eventually) navigate from the main site to
# the index and then back again, without losing their 'editability' setting on the way.
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if letter != '' and not letter.isalpha(): letter = 'A'
letter = letter.upper()
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML, generated by writeHTML.py.
# This may need changing later.
t = loader.get_template('authortitle/index%s.html' % letter )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'letter' : letter,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'called_by_collapsible_page': True,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function browse()
#--------------------------------------------------------------------------------
def browse_e( request, letter = '', pagename = 'index' ): #{
return browse( request, letter, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function medieval_catalogues() allows browsing of the index by medieval document
def medieval_catalogues( request, cat = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
# The call to 'enable edit' is just so that they can (eventually) navigate from the main site to
# the index and then back again, without losing their 'editability' setting on the way.
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
sort_by_date = False
display_decodes = False
if not cat.isalnum(): #{
cat = ''
#}
elif cat == 'bydate': #{
cat = ''
sort_by_date = True
#}
elif cat == 'decode': #{
cat = ''
display_decodes = True
#}
else:
cat = cat.upper()
called_by_collapsible_page = False
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML,
# generated by cataloguesHTML.py. This may need changing later.
if cat:
t = loader.get_template('authortitle/catalogue%s.html' % cat )
elif sort_by_date:
t = loader.get_template('authortitle/cataloguelistbydate.html' )
elif display_decodes:
t = loader.get_template('authortitle/decode.html' )
else: #{
called_by_collapsible_page = True
t = loader.get_template('authortitle/cataloguelist.html' )
#}
c = Context( {
'pagename' : pagename,
'editable' : editable,
'cat' : cat,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'called_by_collapsible_page': called_by_collapsible_page,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function medieval_catalogues()
#--------------------------------------------------------------------------------
def medieval_catalogues_e( request, cat = '', pagename = 'cats' ): #{
return medieval_catalogues( request, cat, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function cat_source() allows browsing of the index by source of medieval catalogue.
# The primary source is the type of institution (document group type), e.g. A for Augustinian Canons.
# You can also browse one location within an institution type (document group type/document group ID),
# e.g. /A/15/ for the Augustinian location 'Lanthony', which has document group ID 15.
def cat_source( request, source = '', loc = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if not source.isalpha(): #{
source = ''
loc = ''
#}
else:
source = source.upper()
if not loc.isalnum(): loc = ''
full_source = source
if loc: full_source += '-%s' % loc.lower()
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML,
# generated by cataloguesHTML.py. This may need changing later.
t = loader.get_template('authortitle/cataloguelist%s.html' % full_source )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'source' : source,
'location' : loc,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function cat_source()
#--------------------------------------------------------------------------------
def cat_source_e( request, source = '', loc = '', pagename = 'cats' ): #{
return cat_source( request, source, loc, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function results() is called either from Quick Search or from Advanced Search
def results( request, pagename = 'results', called_by_editable_page = False ): #{
# Set editability status
if called_by_editable_page: enable_edit()
else: disable_edit()
# Set printing status
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
# See if you are doing quick or advanced search
search_type = mv.get_value_from_GET( request, "search_type", "quick" )
# Run the Solr query
(resultsets, number_of_records, search_term, \
solr_start, solr_rows, page_size ) = run_solr_query( request )
mv.printing = printing
pag = mv.pagination( rows_found = number_of_records, \
current_row = solr_start, \
rows_per_page = solr_rows, \
link_for_print_button = mv.get_link_for_print_button( request ),
link_for_download_button = mv.get_link_for_download_button( request ) )
# Format the results into an HTML string ready for display
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
result_string = get_result_string( resultsets, order_by )
result_string = pag + newline + '<p></p>' + newline + result_string
if number_of_records > solr_rows: # repeat pagination at the bottom
result_string += newline + '<p></p>' + newline + pag
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# Pass HTML string and other data to the template for display
t = loader.get_template( 'authortitle/results.html' )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'results' : result_string,
'order_by' : order_by,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'number_of_records': number_of_records,
'search_type': search_type,
'search_term': search_term,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
'solr_query': solr_query,
} )
return HttpResponse( t.render( c ) )
#}
# end function results()
#--------------------------------------------------------------------------------
def results_e( request, pagename = 'results' ): #{
return results( request, pagename, True )
#}
#--------------------------------------------------------------------------------
#================ End top-level functions called directly from URL ==============
#--------------------------------------------------------------------------------
## This changes links to exclude the 'editable' part of the URL
def disable_edit(): #{
global editable
editable = False
global baseurl
baseurl = '/authortitle'
#}
#--------------------------------------------------------------------------------
## This changes links to include the 'editable' part of the URL
def enable_edit(): #{
global editable
editable = True
global baseurl
baseurl = '/e/authortitle'
#}
#--------------------------------------------------------------------------------
# Either run a basic Solr query (i.e. on a single search term) against default field of 'catalogues' core
# Or run an *advanced* Solr query (i.e. on a multiple search terms)
def run_solr_query( request ): #{
global solr_query # for debugging
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # initialise every field value to blank
resultsets = []
number_of_records = 0
search_type = ""
search_term = solr_start = page_size = solr_query = solr_sort = solr_rows = ""
if request.GET: #{ # was a search term found in GET?
#=====================================================================
# Get search type, records per page, start row and "order by" from GET
#=====================================================================
# Set search type (quick or advanced)
search_type = mv.get_value_from_GET( request, 'search_type', 'quick' )
if search_type not in [ 'quick', 'advanced' ]: search_type = 'quick'
# Set page size
page_size = mv.get_value_from_GET( request, "page_size", str( mv.default_rows_per_page ) )
if page_size.isdigit():
solr_rows = int( page_size )
else:
solr_rows = mv.default_rows_per_page
# Set start page
solr_start = mv.get_value_from_GET( request, "start", 0 )
# Set "order by"
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
if order_by == default_order_by:
solr_sort = order_by + " asc"
elif order_by == "catalogue_provenance":
solr_sort = ",".join( catalogue_provenance_sort_list )
elif order_by == "catalogue_date":
solr_sort = ",".join( catalogue_date_sort_list )
else:
solr_sort = default_order_by + " asc"
#=====================
# Construct Solr query
#=====================
if search_type == 'quick': #{ # search on all fields via the single form field 'search_term'
search_term = mv.get_value_from_GET( request, 'search_term' )
if not search_term: search_term = '*'
solr_query = mv.escape_for_solr( search_term )
if ' ' in solr_query:
solr_query = '(%s)' % solr_query
if search_term=='*' or search_term=='':
solr_query='*:*'
else: #{
solr_query = "text:%s" % solr_query
for field in searchable_fields: #{ # store the search term in the 'text' field
fieldname = field[ "fieldname" ]
if fieldname == "text": #{
field[ "value" ] = search_term
break
#}
#}
#}
#}
else: #{ # advanced search on any combination of multiple searchable fields
fields_searched = []
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
fieldval = mv.get_value_from_GET( request, fieldname, "" )
if fieldval == '*': fieldval = ''
field[ "value" ] = fieldval
if fieldval: #{ # they entered a query on this field
if fieldname in [ "q_earliest_year", "q_latest_year" ]: #{
if fieldval.isdigit(): #{
query_clause = get_date_range_query( fieldname, fieldval )
if query_clause: fields_searched.append( query_clause )
#}
else: # non-numeric year, can't be queried on
field[ "value" ] = ""
#}
else: #{
fieldval = mv.escape_for_solr( fieldval )
if ' ' in fieldval: #{
if fieldname == 's_document_type': # string not text
fieldval = '"%s"' % fieldval
else:
fieldval = '(%s)' % fieldval
#}
fields_searched.append( "%s:%s" % (fieldname, fieldval))
#}
#}
#}
if len( fields_searched ) > 0:
solr_query = " AND ".join( fields_searched )
else: #{
solr_query='*:*'
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
if fieldname == 'text': #{
field[ "value" ] = "*"
break
#}
#}
#}
#}
#===================
# Run the Solr query
#===================
s_para={'q' : solr_query,
'wt' : s_wt, # 's_wt', i.e. 'writer type' is set in config.py, defaults to "json"
'start': solr_start,
'rows' : solr_rows,
'sort' : solr_sort}
r = MLGBsolr()
r.solrresults( s_para, facet, 'catalogues' )
if r.connstatus and r.s_result: #{ #did we retrieve a result?
resultsets = r.s_result.get( 'docs' )
number_of_records = r.s_result.get( 'numFound' )
#}
#} # end of check on whether a search term was found in GET
#===================
# Return the results
#===================
return ( resultsets, number_of_records,
search_term, solr_start, solr_rows, page_size )
#}
# end function run_solr_query()
#--------------------------------------------------------------------------------
def get_date_range_query( fieldname, fieldval ): #{
# E.g. if required range = 1420-1460:
# document start <= 1460, i.e. document START:[ * TO required END]
# document end >= 1420, i.e. document END:[ required START to *]
q = ''
if len( fieldval ) < 4: fieldval = fieldval.rjust( 4, '0' )
if fieldname == 'q_earliest_year': #{ # required START
q = 's_document_end_year:["%s" TO *]' % fieldval
#}
elif fieldname == 'q_latest_year': #{ # required END
q = 's_document_start_year:[* TO "%s"]' % fieldval
#}
return q
#}
# end function get_date_range_query()
#--------------------------------------------------------------------------------
def extract_from_result( record ): #{
solr_id = record[ "id" ]
solr_id_sort = record[ "solr_id_sort" ]
sql_entry_id = record.get( "sql_entry_id", "" )
sql_entry_book_count = record.get( "sql_entry_book_count", "" )
sql_copy_count = record.get( "sql_copy_count", "" )
# from the 'entries' table
s_entry_name = record.get( "s_entry_name", "" )
s_entry_xref_name = record.get( "s_entry_xref_name", "" )
s_author_name = record.get( "s_author_name", "" )
s_entry_biblio_line = record.get( "s_entry_biblio_line", "" )
s_entry_biblio_block = record.get( "s_entry_biblio_block", "" )
s_entry_letter = record.get( "s_entry_letter", "" )
# from the 'books' table
s_title_of_book = record.get( "s_title_of_book", "" )
s_xref_title_of_book = record.get( "s_xref_title_of_book", "" )
s_role_in_book = record.get( "s_role_in_book", "" )
s_problem = record.get( "s_problem", "" )
s_book_biblio_line = record.get( "s_book_biblio_line", "" )
# from the 'copies' table
s_copy_code = record.get( "s_copy_code", "" )
s_copy_notes = record.get( "s_copy_notes", "" )
s_printed_yn = record.get( "s_printed_yn", "" )
s_survives_yn = record.get( "s_survives_yn", "" )
s_uncertain_yn = record.get( "s_uncertain_yn", "" )
s_duplicate_title_yn = record.get( "s_duplicate_title_yn", "" )
# from the 'documents' table
# and 'document groups' table (normally institution location e.g. Canterbury)
# and 'document group types' table (institition type e.g. A for Augustinians)
s_document_code = record.get( "s_document_code", "" )
s_document_code_sort = record.get( "s_document_code_sort", "" )
s_seqno_in_document = record.get( "s_seqno_in_document", "" )
s_seqno_in_doc_sort = record.get( "s_seqno_in_doc_sort", "" )
s_document_name = record.get( "s_document_name", "" )
# these fields are for SORTING on, e.g. '12th century' has a start year of 1198
# and 'late 12th century' has a start year of 1199.
d_document_start = record.get( "d_document_start", "" )
d_document_end = record.get( "d_document_end", "" )
# these fields are for SEARCHING on or displaying
s_document_start_year = record.get( "s_document_start_year", "" )
s_document_end_year = record.get( "s_document_end_year", "" )
s_document_date_in_words = record.get( "s_document_date_in_words", "" )
s_document_type = record.get( "s_document_type", "" )
# doc_group_type_name
s_library_type = record.get( "s_library_type", "" )
# doc_group_name
s_library_loc = record.get( "s_library_loc", "" )
# doc_group_type_code
s_library_type_code = record.get( "s_library_type_code", "" )
# doc_group_id
s_library_loc_id = record.get( "s_library_loc_id", "" )
# from the 'MLGB links' table
s_mlgb_book_id = record.get( "s_mlgb_book_id", [] ) # MLGB book ID is multi-valued, just in case
return (solr_id,
solr_id_sort,
sql_entry_id,
sql_entry_book_count,
sql_copy_count,
s_entry_name,
s_entry_xref_name,
s_author_name,
s_entry_biblio_line,
s_entry_biblio_block,
s_title_of_book,
s_xref_title_of_book,
s_role_in_book,
s_problem,
s_book_biblio_line,
s_copy_code,
s_copy_notes,
s_printed_yn,
s_survives_yn,
s_uncertain_yn,
s_duplicate_title_yn,
s_document_code,
s_document_code_sort,
s_seqno_in_document,
s_seqno_in_doc_sort,
s_document_name,
d_document_start,
d_document_end,
s_document_type,
s_library_type,
s_library_loc,
s_library_type_code,
s_library_loc_id,
s_mlgb_book_id,
s_entry_letter,
s_document_start_year,
s_document_end_year,
s_document_date_in_words,
)
#}
# end function extract_from_result()
#--------------------------------------------------------------------------------
def get_result_string( results, order_by ): #{
if len( results ) == 0: return '<p></p>' + newline
if order_by == 'catalogue_provenance':
return get_result_string_by_catalogue_provenance( results )
elif order_by == 'catalogue_date':
return get_result_string_by_catalogue_date( results )
else:
return get_result_string_by_author_title( results )
#}
# end function get_result_string()
#--------------------------------------------------------------------------------
def get_result_string_by_author_title( results ): #{
html = '<ul><!-- start list of author/title entries -->' + newline
prev_entry_id = ''
prev_entry_book_count = ''
prev_title_of_book = ''
prev_copy_code = ''
for row in results: #{
new_entry = False
new_book = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
if sql_entry_id != prev_entry_id: #{
new_entry = True
new_book = True
#}
elif sql_entry_id == prev_entry_id and sql_entry_book_count != prev_entry_book_count: #{
new_book = True
#}
if new_entry: #{
if prev_entry_id: #{
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start author/title entry -->' + newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block, \
sql_entry_id, s_entry_letter )
html += '<ul><!-- start book list -->' + newline
#}
if new_book: #{
prev_copy_code = ''
if not new_entry: #{
html += '</ul><!-- end catalogue entry list -->' + newline
if prev_title_of_book: html += '</li><!-- end book -->' + newline
#}
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book.strip(): html += '<li class="medieval_cat_result"><!-- start book -->' + newline
prev_title_of_book = s_title_of_book.strip()
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += '<ul><!-- start list of catalogue entries -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += '<li class="medieval_cat_result"><!-- start catalogue entry -->' + newline
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
s_entry_name, s_title_of_book )
html += newline + '<ul>' + newline
if s_library_type: #{
html += '<li>From '
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
if s_document_code and s_document_name:
html += ': %s' % get_document_link( s_document_code, s_document_name, s_document_type )
html += '</li>' + newline
#}
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul>' + newline
html += '</li><!-- end catalogue entry -->' + newline
#}
#}
prev_entry_id = sql_entry_id
prev_entry_book_count = sql_entry_book_count
prev_copy_code = s_copy_code
#}
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += '</ul><!-- end author/title list -->' + newline
return html
#}
# end get_result_string_by_author_title()
#--------------------------------------------------------------------------------
def get_result_string_by_catalogue_provenance( results ): #{
html = '<ul><!-- start list of library types (A) -->' + newline
prev_library = ''
prev_document_code = ''
prev_copy_code = ''
for row in results: #{
new_library = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_library = s_library_type + s_library_loc
if curr_library != prev_library: #{
new_library = True
new_document_code = True
#}
elif curr_library == prev_library and s_document_code != prev_document_code: #{
new_document_code = True
#}
if new_library: #{
if prev_library: #{
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start library list-item (A) -->' + newline
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += newline + '<ul><!-- start document list (B) -->' + newline
#}
if new_document_code: #{
prev_copy_code = ''
if not new_library: #{
html += newline + '</ul><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<ul><!-- start list of catalogue entries (C) -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += newline + '<li class="medieval_cat_result"><!-- start catalogue entry list-item (C) -->'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</li><!-- end catalogue entry list-item (C) -->' + newline
#}
#}
else: #{ # just a cross-reference entry
html += newline + '<li class="medieval_cat_result"><!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</li><!-- end cross-reference entry (C) -->' + newline
#}
prev_library = curr_library
prev_document_code = s_document_code
prev_copy_code = s_copy_code
#}
html += newline
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += '</ul><!-- end list of libraries (A) -->' + newline
return html
#}
# end get_result_string_by_catalogue_provenance()
#--------------------------------------------------------------------------------
def get_result_string_by_catalogue_date( results ): #{
html = ''
html = '<ul><!-- start list of centuries (A) -->' + newline
prev_century = ''
prev_document_code = ''
prev_copy_code = ''
for row in results: #{
new_century = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_century = get_century_from_date( d_document_start )
if curr_century != prev_century: #{
new_century = True
new_document_code = True
#}
elif curr_century == prev_century and s_document_code != prev_document_code: #{
new_document_code = True
#}
if new_century: #{
if prev_century: #{
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start century list-item (A) -->' + newline
html += '<h3>' + get_century_desc( curr_century ) + '</h3>'
html += newline + '<ul><!-- start document list (B) -->' + newline
#}
if new_document_code: #{
prev_copy_code = ''
if not new_century: #{
html += newline + '</table><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name: #{
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += ': ' + get_document_link( s_document_code, s_document_name, s_document_type )
#}
else:
html += '[no document found]'
html += newline + '<table class="century">'
html += '<!-- start list of catalogue entries (C) -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += newline
html += '<tr><!-- start catalogue entry table row (C) -->'
# Summary of date
html += '<td class="medieval_cat_result"><em>'
html += s_document_date_in_words
html += '</em></td>'
html += newline
# Copy code, copy notes, author/title, bibliography
html += '<td class="medieval_cat_result">'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</td></tr><!-- end catalogue entry row (C) -->' + newline
#}
#}
else: #{ # just a cross-reference entry
html += newline
html += '<tr><td></td><td class="medieval_cat_result">'
html += '<!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</td></tr><!-- end cross-reference entry (C) -->' + newline
#}
prev_century = curr_century
prev_document_code = s_document_code
prev_copy_code = s_copy_code
#}
html += newline
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += '</ul><!-- end list of centuries (A) -->' + newline
return html
#}
# end get_result_string_by_catalogue_date()
#--------------------------------------------------------------------------------
def get_century_from_date( date_string ): #{
the_year = ''
date_string = str( date_string )
if len( date_string ) >= 4: the_year = date_string[ 0 : 4 ]
if not the_year.isdigit(): return 'undated'
if the_year.startswith( '0' ): the_year = the_year[ 1 : ]
century = int( math.floor( int( the_year ) / 100 ) + 1 )
return str( century )
#}
#--------------------------------------------------------------------------------
def get_century_desc( century ): #{
if century.isdigit(): #{
if int( century ) >= 20: #{ # undated documents are sorted to the end
century_desc = 'Undated'
#}
else: #{
century_desc = '%sth century' % century
#}
#}
elif century.lower() == 'undated': #{
century_desc = 'Undated'
#}
return century_desc
#}
##=====================================================================================
def get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter ): #{
if s_entry_letter == 'I/J': s_entry_letter = 'IJ'
entry_href = '%s/browse/%s/#entry%s_anchor' % (baseurl, s_entry_letter, sql_entry_id)
html = '<a href="%s" title="%s">' % (entry_href, s_entry_name)
html += s_entry_name
html += '</a>'
if s_entry_xref_name: html += ' %s %s' % (right_arrow, s_entry_xref_name)
if s_entry_biblio_line: html += ': ' + s_entry_biblio_line + newline
if s_entry_biblio_block: #{
display_chars = s_entry_biblio_block.replace( '<span class="biblio_block">', "" )
display_chars = display_chars.replace( '</span>', "" )
if len( display_chars ) > biblio_block_line_length: # show up to 1 line of block
show_biblio_block = False
else:
show_biblio_block = True
if show_biblio_block: #{
html += newline + '<div>'
html += s_entry_biblio_block
html += '</div>' + newline
#}
else: #{
pointing_at = 'bibliographical details'
html += newline + '<script type="text/javascript">' + newline
html += "function expand_collapse_biblio_block_%s() {" % solr_id
html += newline
html += ' var the_block = document.getElementById( "biblio_block_%s" );' % solr_id
html += newline
html += ' var the_button = document.getElementById( "biblio_button_%s" );' % solr_id
html += newline
html += ' if( the_block.style.display == "block" ) {'
html += newline
html += ' the_block.style.display = "none";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_right_img( pointing_at )
html += newline
html += ' }'
html += newline
html += ' else {'
html += newline
html += ' the_block.style.display = "block";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_down_img( pointing_at )
html += newline
html += ' }'
html += newline
html += '}'
html += newline
html += '</script>' + newline
html += '<button id="biblio_button_%s" ' % solr_id
html += ' class="manicule" onclick="expand_collapse_biblio_block_%s()" >' % solr_id
html += mv.manicule_pointing_right_img( pointing_at )
html += '</button>' + newline
html += '<br />' + newline
html += '<div id="biblio_block_%s" style="display:none">' % solr_id
html += s_entry_biblio_block
html += '<p></p>' + newline
html += '</div>'
html += newline
#}
#}
return html
#}
# end get_entry_name_and_biblio_string()
#--------------------------------------------------------------------------------
def get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line ): #{
html = ''
if s_problem: html += s_problem + ' '
if s_role_in_book: html += s_role_in_book + ' '
if s_title_of_book and s_title_of_book.strip() != s_xref_title_of_book.strip():
html += s_title_of_book
if s_book_biblio_line: html += ": " + s_book_biblio_line
if s_xref_title_of_book: html += "%s %s" % (right_arrow, s_xref_title_of_book)
return html
#}
# end get_book_title_and_biblio_string()
#--------------------------------------------------------------------------------
def get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn ): #{
html = ''
if s_survives_yn == 'y':
html += '<li>Surviving book</li>' + newline
if s_printed_yn == 'y':
html += '<li>Printed book</li>' + newline
if s_uncertain_yn == 'y':
html += '<li>Uncertain identification</li>' + newline
if s_duplicate_title_yn == 'y':
html += '<li>Could refer to one of several works with the same title</li>' + newline
return html
#}
# end get_flags_string()
#--------------------------------------------------------------------------------
def get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_title_part_1 = '', hover_title_part_2 = '' ): #{
html = ''
editable_link = ''
if editable: editable_link = '/e'
hover_title = hover_title_part_1
if hover_title_part_2.strip() and hover_title_part_2.strip() != hover_title_part_1.strip():
hover_title += ' -- %s' % hover_title_part_2
hover_title = hover_title.replace( '<i>', '' )
hover_title = hover_title.replace( '</i>', '' )
hover_title = hover_title.replace( '"', "'" )
onclick_title = hover_title.replace( newline, ' ' )
onclick_title = onclick_title.replace( carriage_return, '' )
onclick_title = onclick_title.replace( "'", "\\'" )
# Either start a link to the MLGB book record...
for book_id in s_mlgb_book_id: #{
html += '<a href="%s%s/%s/" ' % (editable_link, mlgb_book_url, book_id)
html += ' title="Further details of book" '
html += ' class="link_from_index_to_book">'
html += s_copy_code
html += '</a> '
#}
# Or start a span which you can hover over and get a bit more info.
if not html: #{
html += '<span title="%s" class="index_catalogue_entry" ' % hover_title
html += ' onclick="alert(' + "'" + onclick_title + "'" + ')">'
html += s_copy_code
html += '</span>'
#}
# Add description/notes if there are any,
# e.g. 'sermones Ailmeri prioris in glosis' or '(1 copy) = K5.7'
if s_copy_notes.strip(): html += ' %s' % s_copy_notes
return html
#}
# end get_copy_string()
#--------------------------------------------------------------------------------
def get_library_link( library_type_code, library_type_name, library_loc_id, library_loc_name ): #{
if not library_type_code or not library_type_name:
return '[no library found]'
html = ''
editable_link = ''
if editable: editable_link = '/e'
library_type_url = "%s%s/source/%s/" % (editable_link, medieval_catalogues_url, library_type_code)
html += '<a href="%s" title="%s">%s</a>' % (library_type_url, library_type_name, library_type_name)
if library_loc_id and library_loc_name: #{
if not library_type_name.endswith( library_loc_name ): #{ e.g HENRY DE KIRKESTEDE gets repeated twice
library_loc_url = "%s%s/" % (library_type_url, library_loc_id)
html += ': <a href="%s" title="%s">%s</a>' % (library_loc_url, library_loc_name, library_loc_name)
#}
#}
return html
#}
#--------------------------------------------------------------------------------
def get_document_link( document_code, document_name, s_document_type = '' ): #{
if not document_code or not document_name: return ''
html = ''
editable_link = ''
if editable: editable_link = '/e'
url = "%s%s/%s/" % (editable_link, medieval_catalogues_url, document_code)
html += '<a href="%s" title="%s">%s</a>' % (url, document_name, document_name)
# Was going to show document type, but that's unnecessary (it's already given in document name)
#if s_document_type and s_document_type != 'undefined': #{
#html += ' [type of list: %s]' % s_document_type
#}
return html
#}
#--------------------------------------------------------------------------------
def get_doctype_dropdown_options(): #{
# Get a list of document types for a dropdown list
doctypes = [ "" ]
the_cursor = connection.cursor()
statement = "select distinct document_type from index_medieval_documents order by document_type"
the_cursor.execute( statement )
sql_doctypes = the_cursor.fetchall()
for sql_row in sql_doctypes:
doctypes.append( sql_row[ 0 ] )
the_cursor.close()
return doctypes
#}
#--------------------------------------------------------------------------------
| 37.169505 | 106 | 0.596089 |
import math
from django.template import Context, loader
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.db import connection
from urllib import quote, unquote
from mysite.config import *
from mysite.MLGBsolr import *
import mysite.mlgb.views as mv
solr_query = ''
printing = False
editable = False
baseurl="/authortitle"
medieval_catalogues_url = "/authortitle/medieval_catalogues"
mlgb_book_url = '/mlgb/book'
default_order_by = "solr_id_sort"
catalogue_provenance_sort_list = [ "s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
catalogue_date_sort_list = [ "d_document_start asc",
"d_document_end asc",
"s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
searchable_fields = [
{ "fieldname": "text", "label": "All fields", "info": "", "value": "" },
{ "fieldname": "t_author", "label": "Author", "info": "", "value": "" },
{ "fieldname": "t_title", "label": "Title of book", "info": "", "value": "" },
{ "fieldname": "t_bibliography", "label": "Bibliographical details", "info": "", "value": "" },
{ "fieldname": "t_library", "label": "Catalogue provenance",
"info": "E.g. 'Benedictines Peterborough' or 'Henry de Kirkestede'", "value": "" },
{ "fieldname": "t_document", "label": "Description of document", "value": "", "info":
"E.g. 'Books read in the refectory, 13th cent'." \
+ " Description includes either an indication of document date or the word 'undated'." },
{ "fieldname": "s_document_type", "label": "Type of document", "value": "", "info": "" },
{ "fieldname": "q_earliest_year", "label": "Start of required date range", "value": "",
"info": "Enter the earliest year that you are interested in, e.g. 1400." },
{ "fieldname": "q_latest_year", "label": "End of required date range", "value": "",
"info": "Enter the latest year that you are interested in, e.g. 1499." },
]
facet = False
newline = '\n'
carriage_return = '\r'
right_arrow = '→'
biblio_block_line_length = 100
#================= Top-level functions, called directly from URL ================
#--------------------------------------------------------------------------------
# The function browse() allows browsing of the index by author/title
def browse( request, letter = '', pagename = 'index', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if letter != '' and not letter.isalpha(): letter = 'A'
letter = letter.upper()
doctypes = get_doctype_dropdown_options()
# This may need changing later.
t = loader.get_template('authortitle/index%s.html' % letter )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'letter' : letter,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'called_by_collapsible_page': True,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function browse()
#--------------------------------------------------------------------------------
def browse_e( request, letter = '', pagename = 'index' ): #{
return browse( request, letter, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function medieval_catalogues() allows browsing of the index by medieval document
def medieval_catalogues( request, cat = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
sort_by_date = False
display_decodes = False
if not cat.isalnum():
cat = ''
elif cat == 'bydate':
cat = ''
sort_by_date = True
elif cat == 'decode':
cat = ''
display_decodes = True
else:
cat = cat.upper()
called_by_collapsible_page = False
doctypes = get_doctype_dropdown_options()
# generated by cataloguesHTML.py. This may need changing later.
if cat:
t = loader.get_template('authortitle/catalogue%s.html' % cat )
elif sort_by_date:
t = loader.get_template('authortitle/cataloguelistbydate.html' )
elif display_decodes:
t = loader.get_template('authortitle/decode.html' )
else: #{
called_by_collapsible_page = True
t = loader.get_template('authortitle/cataloguelist.html' )
#}
c = Context( {
'pagename' : pagename,
'editable' : editable,
'cat' : cat,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'called_by_collapsible_page': called_by_collapsible_page,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function medieval_catalogues()
#--------------------------------------------------------------------------------
def medieval_catalogues_e( request, cat = '', pagename = 'cats' ): #{
return medieval_catalogues( request, cat, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function cat_source() allows browsing of the index by source of medieval catalogue.
# The primary source is the type of institution (document group type), e.g. A for Augustinian Canons.
# You can also browse one location within an institution type (document group type/document group ID),
# e.g. /A/15/ for the Augustinian location 'Lanthony', which has document group ID 15.
def cat_source( request, source = '', loc = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if not source.isalpha():
source = ''
loc = ''
else:
source = source.upper()
if not loc.isalnum(): loc = ''
full_source = source
if loc: full_source += '-%s' % loc.lower()
doctypes = get_doctype_dropdown_options()
# generated by cataloguesHTML.py. This may need changing later.
t = loader.get_template('authortitle/cataloguelist%s.html' % full_source )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'source' : source,
'location' : loc,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function cat_source()
#--------------------------------------------------------------------------------
def cat_source_e( request, source = '', loc = '', pagename = 'cats' ): #{
return cat_source( request, source, loc, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function results() is called either from Quick Search or from Advanced Search
def results( request, pagename = 'results', called_by_editable_page = False ): #{
# Set editability status
if called_by_editable_page: enable_edit()
else: disable_edit()
# Set printing status
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
# See if you are doing quick or advanced search
search_type = mv.get_value_from_GET( request, "search_type", "quick" )
# Run the Solr query
(resultsets, number_of_records, search_term, \
solr_start, solr_rows, page_size ) = run_solr_query( request )
mv.printing = printing
pag = mv.pagination( rows_found = number_of_records, \
current_row = solr_start, \
rows_per_page = solr_rows, \
link_for_print_button = mv.get_link_for_print_button( request ),
link_for_download_button = mv.get_link_for_download_button( request ) )
# Format the results into an HTML string ready for display
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
result_string = get_result_string( resultsets, order_by )
result_string = pag + newline + '<p></p>' + newline + result_string
if number_of_records > solr_rows: # repeat pagination at the bottom
result_string += newline + '<p></p>' + newline + pag
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# Pass HTML string and other data to the template for display
t = loader.get_template( 'authortitle/results.html' )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'results' : result_string,
'order_by' : order_by,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'number_of_records': number_of_records,
'search_type': search_type,
'search_term': search_term,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
'solr_query': solr_query,
} )
return HttpResponse( t.render( c ) )
#}
# end function results()
#--------------------------------------------------------------------------------
def results_e( request, pagename = 'results' ): #{
return results( request, pagename, True )
#}
#--------------------------------------------------------------------------------
#================ End top-level functions called directly from URL ==============
#--------------------------------------------------------------------------------
## This changes links to exclude the 'editable' part of the URL
def disable_edit(): #{
global editable
editable = False
global baseurl
baseurl = '/authortitle'
#}
#--------------------------------------------------------------------------------
## This changes links to include the 'editable' part of the URL
def enable_edit(): #{
global editable
editable = True
global baseurl
baseurl = '/e/authortitle'
#}
#--------------------------------------------------------------------------------
# Either run a basic Solr query (i.e. on a single search term) against default field of 'catalogues' core
# Or run an *advanced* Solr query (i.e. on a multiple search terms)
def run_solr_query( request ): #{
global solr_query # for debugging
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # initialise every field value to blank
resultsets = []
number_of_records = 0
search_type = ""
search_term = solr_start = page_size = solr_query = solr_sort = solr_rows = ""
if request.GET: #{ # was a search term found in GET?
#=====================================================================
# Get search type, records per page, start row and "order by" from GET
#=====================================================================
# Set search type (quick or advanced)
search_type = mv.get_value_from_GET( request, 'search_type', 'quick' )
if search_type not in [ 'quick', 'advanced' ]: search_type = 'quick'
# Set page size
page_size = mv.get_value_from_GET( request, "page_size", str( mv.default_rows_per_page ) )
if page_size.isdigit():
solr_rows = int( page_size )
else:
solr_rows = mv.default_rows_per_page
# Set start page
solr_start = mv.get_value_from_GET( request, "start", 0 )
# Set "order by"
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
if order_by == default_order_by:
solr_sort = order_by + " asc"
elif order_by == "catalogue_provenance":
solr_sort = ",".join( catalogue_provenance_sort_list )
elif order_by == "catalogue_date":
solr_sort = ",".join( catalogue_date_sort_list )
else:
solr_sort = default_order_by + " asc"
#=====================
# Construct Solr query
#=====================
if search_type == 'quick': #{ # search on all fields via the single form field 'search_term'
search_term = mv.get_value_from_GET( request, 'search_term' )
if not search_term: search_term = '*'
solr_query = mv.escape_for_solr( search_term )
if ' ' in solr_query:
solr_query = '(%s)' % solr_query
if search_term=='*' or search_term=='':
solr_query='*:*'
else: #{
solr_query = "text:%s" % solr_query
for field in searchable_fields: #{ # store the search term in the 'text' field
fieldname = field[ "fieldname" ]
if fieldname == "text": #{
field[ "value" ] = search_term
break
#}
#}
#}
#}
else: #{ # advanced search on any combination of multiple searchable fields
fields_searched = []
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
fieldval = mv.get_value_from_GET( request, fieldname, "" )
if fieldval == '*': fieldval = ''
field[ "value" ] = fieldval
if fieldval: #{ # they entered a query on this field
if fieldname in [ "q_earliest_year", "q_latest_year" ]: #{
if fieldval.isdigit(): #{
query_clause = get_date_range_query( fieldname, fieldval )
if query_clause: fields_searched.append( query_clause )
#}
else: # non-numeric year, can't be queried on
field[ "value" ] = ""
else:
fieldval = mv.escape_for_solr( fieldval )
if ' ' in fieldval:
if fieldname == 's_document_type':
fieldval = '"%s"' % fieldval
else:
fieldval = '(%s)' % fieldval
fields_searched.append( "%s:%s" % (fieldname, fieldval))
if len( fields_searched ) > 0:
solr_query = " AND ".join( fields_searched )
else:
solr_query='*:*'
for field in searchable_fields:
fieldname = field[ "fieldname" ]
if fieldname == 'text':
field[ "value" ] = "*"
break
s_para={'q' : solr_query,
'wt' : s_wt,
'start': solr_start,
'rows' : solr_rows,
'sort' : solr_sort}
r = MLGBsolr()
r.solrresults( s_para, facet, 'catalogues' )
if r.connstatus and r.s_result: esult.get( 'docs' )
number_of_records = r.s_result.get( 'numFound' )
search_term, solr_start, solr_rows, page_size )
def get_date_range_query( fieldname, fieldval ):
q = ''
if len( fieldval ) < 4: fieldval = fieldval.rjust( 4, '0' )
if fieldname == 'q_earliest_year': ment_end_year:["%s" TO *]' % fieldval
elif fieldname == 'q_latest_year': cument_start_year:[* TO "%s"]' % fieldval
return q
def extract_from_result( record ):
solr_id = record[ "id" ]
solr_id_sort = record[ "solr_id_sort" ]
sql_entry_id = record.get( "sql_entry_id", "" )
sql_entry_book_count = record.get( "sql_entry_book_count", "" )
sql_copy_count = record.get( "sql_copy_count", "" )
s_entry_name = record.get( "s_entry_name", "" )
s_entry_xref_name = record.get( "s_entry_xref_name", "" )
s_author_name = record.get( "s_author_name", "" )
s_entry_biblio_line = record.get( "s_entry_biblio_line", "" )
s_entry_biblio_block = record.get( "s_entry_biblio_block", "" )
s_entry_letter = record.get( "s_entry_letter", "" )
s_title_of_book = record.get( "s_title_of_book", "" )
s_xref_title_of_book = record.get( "s_xref_title_of_book", "" )
s_role_in_book = record.get( "s_role_in_book", "" )
s_problem = record.get( "s_problem", "" )
s_book_biblio_line = record.get( "s_book_biblio_line", "" )
s_copy_code = record.get( "s_copy_code", "" )
s_copy_notes = record.get( "s_copy_notes", "" )
s_printed_yn = record.get( "s_printed_yn", "" )
s_survives_yn = record.get( "s_survives_yn", "" )
s_uncertain_yn = record.get( "s_uncertain_yn", "" )
s_duplicate_title_yn = record.get( "s_duplicate_title_yn", "" )
s_document_code = record.get( "s_document_code", "" )
s_document_code_sort = record.get( "s_document_code_sort", "" )
s_seqno_in_document = record.get( "s_seqno_in_document", "" )
s_seqno_in_doc_sort = record.get( "s_seqno_in_doc_sort", "" )
s_document_name = record.get( "s_document_name", "" )
d_document_start = record.get( "d_document_start", "" )
d_document_end = record.get( "d_document_end", "" )
s_document_start_year = record.get( "s_document_start_year", "" )
s_document_end_year = record.get( "s_document_end_year", "" )
s_document_date_in_words = record.get( "s_document_date_in_words", "" )
s_document_type = record.get( "s_document_type", "" )
s_library_type = record.get( "s_library_type", "" )
s_library_loc = record.get( "s_library_loc", "" )
s_library_type_code = record.get( "s_library_type_code", "" )
s_library_loc_id = record.get( "s_library_loc_id", "" )
s_mlgb_book_id = record.get( "s_mlgb_book_id", [] )
return (solr_id,
solr_id_sort,
sql_entry_id,
sql_entry_book_count,
sql_copy_count,
s_entry_name,
s_entry_xref_name,
s_author_name,
s_entry_biblio_line,
s_entry_biblio_block,
s_title_of_book,
s_xref_title_of_book,
s_role_in_book,
s_problem,
s_book_biblio_line,
s_copy_code,
s_copy_notes,
s_printed_yn,
s_survives_yn,
s_uncertain_yn,
s_duplicate_title_yn,
s_document_code,
s_document_code_sort,
s_seqno_in_document,
s_seqno_in_doc_sort,
s_document_name,
d_document_start,
d_document_end,
s_document_type,
s_library_type,
s_library_loc,
s_library_type_code,
s_library_loc_id,
s_mlgb_book_id,
s_entry_letter,
s_document_start_year,
s_document_end_year,
s_document_date_in_words,
)
def get_result_string( results, order_by ):
if len( results ) == 0: return '<p></p>' + newline
if order_by == 'catalogue_provenance':
return get_result_string_by_catalogue_provenance( results )
elif order_by == 'catalogue_date':
return get_result_string_by_catalogue_date( results )
else:
return get_result_string_by_author_title( results )
def get_result_string_by_author_title( results ):
html = '<ul><!-- start list of author/title entries -->' + newline
prev_entry_id = ''
prev_entry_book_count = ''
prev_title_of_book = ''
prev_copy_code = ''
for row in results:
new_entry = False
new_book = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
if sql_entry_id != prev_entry_id:
new_entry = True
new_book = True
elif sql_entry_id == prev_entry_id and sql_entry_book_count != prev_entry_book_count:
new_book = True
if new_entry:
if prev_entry_id:
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start author/title entry -->' + newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block, \
sql_entry_id, s_entry_letter )
html += '<ul><!-- start book list -->' + newline
if new_book:
prev_copy_code = ''
if not new_entry:
html += '</ul><!-- end catalogue entry list -->' + newline
if prev_title_of_book: html += '</li><!-- end book -->' + newline
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book.strip(): html += '<li class="medieval_cat_result"><!-- start book -->' + newline
prev_title_of_book = s_title_of_book.strip()
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += '<ul><!-- start list of catalogue entries -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += '<li class="medieval_cat_result"><!-- start catalogue entry -->' + newline
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
s_entry_name, s_title_of_book )
html += newline + '<ul>' + newline
if s_library_type:
html += '<li>From '
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
if s_document_code and s_document_name:
html += ': %s' % get_document_link( s_document_code, s_document_name, s_document_type )
html += '</li>' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul>' + newline
html += '</li><!-- end catalogue entry -->' + newline
prev_entry_id = sql_entry_id
prev_entry_book_count = sql_entry_book_count
prev_copy_code = s_copy_code
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += '</ul><!-- end author/title list -->' + newline
return html
def get_result_string_by_catalogue_provenance( results ):
html = '<ul><!-- start list of library types (A) -->' + newline
prev_library = ''
prev_document_code = ''
prev_copy_code = ''
for row in results:
new_library = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_library = s_library_type + s_library_loc
if curr_library != prev_library:
new_library = True
new_document_code = True
elif curr_library == prev_library and s_document_code != prev_document_code:
new_document_code = True
if new_library:
if prev_library:
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start library list-item (A) -->' + newline
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += newline + '<ul><!-- start document list (B) -->' + newline
if new_document_code:
prev_copy_code = ''
if not new_library:
html += newline + '</ul><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<ul><!-- start list of catalogue entries (C) -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += newline + '<li class="medieval_cat_result"><!-- start catalogue entry list-item (C) -->'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</li><!-- end catalogue entry list-item (C) -->' + newline
else: class="medieval_cat_result"><!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</li><!-- end cross-reference entry (C) -->' + newline
prev_library = curr_library
prev_document_code = s_document_code
prev_copy_code = s_copy_code
html += newline
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += '</ul><!-- end list of libraries (A) -->' + newline
return html
def get_result_string_by_catalogue_date( results ):
html = ''
html = '<ul><!-- start list of centuries (A) -->' + newline
prev_century = ''
prev_document_code = ''
prev_copy_code = ''
for row in results:
new_century = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_century = get_century_from_date( d_document_start )
if curr_century != prev_century:
new_century = True
new_document_code = True
elif curr_century == prev_century and s_document_code != prev_document_code:
new_document_code = True
if new_century:
if prev_century:
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start century list-item (A) -->' + newline
html += '<h3>' + get_century_desc( curr_century ) + '</h3>'
html += newline + '<ul><!-- start document list (B) -->' + newline
if new_document_code:
prev_copy_code = ''
if not new_century:
html += newline + '</table><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += ': ' + get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<table class="century">'
html += '<!-- start list of catalogue entries (C) -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += newline
html += '<tr><!-- start catalogue entry table row (C) -->'
html += '<td class="medieval_cat_result"><em>'
html += s_document_date_in_words
html += '</em></td>'
html += newline
html += '<td class="medieval_cat_result">'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</td></tr><!-- end catalogue entry row (C) -->' + newline
else: html += '<tr><td></td><td class="medieval_cat_result">'
html += '<!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</td></tr><!-- end cross-reference entry (C) -->' + newline
prev_century = curr_century
prev_document_code = s_document_code
prev_copy_code = s_copy_code
html += newline
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += '</ul><!-- end list of centuries (A) -->' + newline
return html
def get_century_from_date( date_string ):
the_year = ''
date_string = str( date_string )
if len( date_string ) >= 4: the_year = date_string[ 0 : 4 ]
if not the_year.isdigit(): return 'undated'
if the_year.startswith( '0' ): the_year = the_year[ 1 : ]
century = int( math.floor( int( the_year ) / 100 ) + 1 )
return str( century )
def get_century_desc( century ):
if century.isdigit():
if int( century ) >= 20: else:
century_desc = '%sth century' % century
elif century.lower() == 'undated':
century_desc = 'Undated'
return century_desc
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter ):
if s_entry_letter == 'I/J': s_entry_letter = 'IJ'
entry_href = '%s/browse/%s/#entry%s_anchor' % (baseurl, s_entry_letter, sql_entry_id)
html = '<a href="%s" title="%s">' % (entry_href, s_entry_name)
html += s_entry_name
html += '</a>'
if s_entry_xref_name: html += ' %s %s' % (right_arrow, s_entry_xref_name)
if s_entry_biblio_line: html += ': ' + s_entry_biblio_line + newline
if s_entry_biblio_block:
display_chars = s_entry_biblio_block.replace( '<span class="biblio_block">', "" )
display_chars = display_chars.replace( '</span>', "" )
if len( display_chars ) > biblio_block_line_length:
show_biblio_block = False
else:
show_biblio_block = True
if show_biblio_block:
html += newline + '<div>'
html += s_entry_biblio_block
html += '</div>' + newline
else:
pointing_at = 'bibliographical details'
html += newline + '<script type="text/javascript">' + newline
html += "function expand_collapse_biblio_block_%s() {" % solr_id
html += newline
html += ' var the_block = document.getElementById( "biblio_block_%s" );' % solr_id
html += newline
html += ' var the_button = document.getElementById( "biblio_button_%s" );' % solr_id
html += newline
html += ' if( the_block.style.display == "block" ) {'
html += newline
html += ' the_block.style.display = "none";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_right_img( pointing_at )
html += newline
html += ' }'
html += newline
html += ' else {'
html += newline
html += ' the_block.style.display = "block";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_down_img( pointing_at )
html += newline
html += ' }'
html += newline
html += '}'
html += newline
html += '</script>' + newline
html += '<button id="biblio_button_%s" ' % solr_id
html += ' class="manicule" onclick="expand_collapse_biblio_block_%s()" >' % solr_id
html += mv.manicule_pointing_right_img( pointing_at )
html += '</button>' + newline
html += '<br />' + newline
html += '<div id="biblio_block_%s" style="display:none">' % solr_id
html += s_entry_biblio_block
html += '<p></p>' + newline
html += '</div>'
html += newline
return html
def get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line ):
html = ''
if s_problem: html += s_problem + ' '
if s_role_in_book: html += s_role_in_book + ' '
if s_title_of_book and s_title_of_book.strip() != s_xref_title_of_book.strip():
html += s_title_of_book
if s_book_biblio_line: html += ": " + s_book_biblio_line
if s_xref_title_of_book: html += "%s %s" % (right_arrow, s_xref_title_of_book)
return html
def get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn ):
html = ''
if s_survives_yn == 'y':
html += '<li>Surviving book</li>' + newline
if s_printed_yn == 'y':
html += '<li>Printed book</li>' + newline
if s_uncertain_yn == 'y':
html += '<li>Uncertain identification</li>' + newline
if s_duplicate_title_yn == 'y':
html += '<li>Could refer to one of several works with the same title</li>' + newline
return html
def get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_title_part_1 = '', hover_title_part_2 = '' ):
html = ''
editable_link = ''
if editable: editable_link = '/e'
hover_title = hover_title_part_1
if hover_title_part_2.strip() and hover_title_part_2.strip() != hover_title_part_1.strip():
hover_title += ' -- %s' % hover_title_part_2
hover_title = hover_title.replace( '<i>', '' )
hover_title = hover_title.replace( '</i>', '' )
hover_title = hover_title.replace( '"', "'" )
onclick_title = hover_title.replace( newline, ' ' )
onclick_title = onclick_title.replace( carriage_return, '' )
onclick_title = onclick_title.replace( "'", "\\'" )
# Either start a link to the MLGB book record...
for book_id in s_mlgb_book_id: #{
html += '<a href="%s%s/%s/" ' % (editable_link, mlgb_book_url, book_id)
html += ' title="Further details of book" '
html += ' class="link_from_index_to_book">'
html += s_copy_code
html += '</a> '
#}
# Or start a span which you can hover over and get a bit more info.
if not html: #{
html += '<span title="%s" class="index_catalogue_entry" ' % hover_title
html += ' onclick="alert(' + "'" + onclick_title + "'" + ')">'
html += s_copy_code
html += '</span>'
#}
# Add description/notes if there are any,
# e.g. 'sermones Ailmeri prioris in glosis' or '(1 copy) = K5.7'
if s_copy_notes.strip(): html += ' %s' % s_copy_notes
return html
#}
# end get_copy_string()
#--------------------------------------------------------------------------------
def get_library_link( library_type_code, library_type_name, library_loc_id, library_loc_name ): #{
if not library_type_code or not library_type_name:
return '[no library found]'
html = ''
editable_link = ''
if editable: editable_link = '/e'
library_type_url = "%s%s/source/%s/" % (editable_link, medieval_catalogues_url, library_type_code)
html += '<a href="%s" title="%s">%s</a>' % (library_type_url, library_type_name, library_type_name)
if library_loc_id and library_loc_name: #{
if not library_type_name.endswith( library_loc_name ): #{ e.g HENRY DE KIRKESTEDE gets repeated twice
library_loc_url = "%s%s/" % (library_type_url, library_loc_id)
html += ': <a href="%s" title="%s">%s</a>' % (library_loc_url, library_loc_name, library_loc_name)
#}
#}
return html
#}
#--------------------------------------------------------------------------------
def get_document_link( document_code, document_name, s_document_type = '' ): #{
if not document_code or not document_name: return ''
html = ''
editable_link = ''
if editable: editable_link = '/e'
url = "%s%s/%s/" % (editable_link, medieval_catalogues_url, document_code)
html += '<a href="%s" title="%s">%s</a>' % (url, document_name, document_name)
# Was going to show document type, but that's unnecessary (it's already given in document name)
#if s_document_type and s_document_type != 'undefined': #{
#html += ' [type of list: %s]' % s_document_type
#}
return html
#}
#--------------------------------------------------------------------------------
def get_doctype_dropdown_options(): #{
# Get a list of document types for a dropdown list
doctypes = [ "" ]
the_cursor = connection.cursor()
statement = "select distinct document_type from index_medieval_documents order by document_type"
the_cursor.execute( statement )
sql_doctypes = the_cursor.fetchall()
for sql_row in sql_doctypes:
doctypes.append( sql_row[ 0 ] )
the_cursor.close()
return doctypes
#}
#--------------------------------------------------------------------------------
| true | true |
f72142840be762476a0be5e21baec4a6ef055bf3 | 939 | py | Python | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
shared_with = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='shared_lists'
)
@property
def name(self):
return self.item_set.first().text
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
class Meta:
ordering = ('id',)
unique_together = ('list', 'text')
def __str__(self):
return self.text
| 26.083333 | 78 | 0.681576 | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
shared_with = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='shared_lists'
)
@property
def name(self):
return self.item_set.first().text
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
class Meta:
ordering = ('id',)
unique_together = ('list', 'text')
def __str__(self):
return self.text
| true | true |
f721429b03aedf2a6362c8a4270184f7d7d464c4 | 4,555 | py | Python | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | import sublime, sublime_plugin
import os, webbrowser, shlex, json, collections
def ionicv1_ask_custom_path(project_path, type):
sublime.active_window().show_input_panel("Ionic v1 CLI custom path", "ionic", lambda ionicv1_custom_path: ionicv1_prepare_project(project_path, ionicv1_custom_path) if type == "create_new_project" or type == "add_project_type" else add_ionicv1_settings(project_path, ionicv1_custom_path), None, None)
def add_ionicv1_settings(working_directory, ionicv1_custom_path):
project_path = working_directory
settings = get_project_settings()
if settings :
project_path = settings["project_dir_name"]
flowconfig_file_path = os.path.join(project_path, ".flowconfig")
with open(flowconfig_file_path, 'r+', encoding="utf-8") as file:
content = file.read()
content = content.replace("[ignore]", """[ignore]
<PROJECT_ROOT>/platforms/.*
<PROJECT_ROOT>/hooks/.*
<PROJECT_ROOT>/plugins/.*
<PROJECT_ROOT>/resources/.*""")
file.seek(0)
file.truncate()
file.write(content)
PROJECT_SETTINGS_FOLDER_PATH = os.path.join(project_path, PROJECT_SETTINGS_FOLDER_NAME)
default_config = json.loads(open(os.path.join(PROJECT_FOLDER, "ionicv1", "default_config.json")).read(), object_pairs_hook=collections.OrderedDict)
default_config["working_directory"] = working_directory
default_config["cli_custom_path"] = ionicv1_custom_path
ionicv1_settings = os.path.join(PROJECT_SETTINGS_FOLDER_PATH, "ionicv1_settings.json")
with open(ionicv1_settings, 'w+') as file:
file.write(json.dumps(default_config, indent=2))
def ionicv1_prepare_project(project_path, ionicv1_custom_path):
terminal = Terminal(cwd=project_path)
if sublime.platform() != "windows":
open_project = ["&&", shlex.quote(sublime_executable_path()), shlex.quote(get_project_settings(project_path)["project_file_name"])] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([shlex.quote(ionicv1_custom_path), "start", "myApp", "blank", "--type", "ionic1", ";", "mv", "./myApp/{.[!.],}*", "./", ";", "rm", "-rf", "myApp"] + open_project)
else:
open_project = [sublime_executable_path(), get_project_settings(project_path)["project_file_name"], "&&", "exit"] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([ionicv1_custom_path, "start", "myApp", "blank", "--type", "ionic1", "&", os.path.join(WINDOWS_BATCH_FOLDER, "move_all.bat"), "myApp", ".", "&", "rd", "/s", "/q", "myApp"])
if open_project:
terminal.run(open_project)
add_ionicv1_settings(project_path, ionicv1_custom_path)
Hook.add("ionicv1_after_create_new_project", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_configuration", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_type", ionicv1_ask_custom_path)
class enable_menu_ionicv1EventListener(enable_menu_project_typeEventListener):
project_type = "ionicv1"
path = os.path.join(PROJECT_FOLDER, "ionicv1", "Main.sublime-menu")
path_disabled = os.path.join(PROJECT_FOLDER, "ionicv1", "Main_disabled.sublime-menu")
class ionicv1_cliCommand(manage_cliCommand):
cli = "ionic"
custom_name = "ionicv1"
settings_name = "ionicv1_settings"
def prepare_command(self, **kwargs):
if ":platform" in self.command:
self.window.show_input_panel("Platform:", "", self.platform_on_done, None, None)
else :
self._run()
def platform_on_done(self, platform):
self.placeholders[":platform"] = shlex.quote(platform.strip())
self.command = self.substitute_placeholders(self.command)
self._run()
def _run(self):
try:
self.command = {
'run': lambda : self.command + self.settings["ionicv1_settings"]["platform_run_options"][self.command[2].replace('--', '')][self.command[1]],
'compile': lambda : self.command + self.settings["ionicv1_settings"]["platform_compile_options"][self.command[2].replace('--', '')][self.command[1]],
'build': lambda : self.command + self.settings["ionicv1_settings"]["platform_build_options"][self.command[2].replace('--', '')][self.command[1]],
'prepare': lambda : self.command + self.settings["ionicv2_settings"]["platform_prepare_options"][self.command[1]],
'serve': lambda : self.command + self.settings["ionicv1_settings"]["serve_options"]
}[self.command[0]]()
except KeyError as err:
pass
except Exception as err:
print(traceback.format_exc())
pass
super(ionicv1_cliCommand, self)._run()
| 47.947368 | 304 | 0.730626 | import sublime, sublime_plugin
import os, webbrowser, shlex, json, collections
def ionicv1_ask_custom_path(project_path, type):
sublime.active_window().show_input_panel("Ionic v1 CLI custom path", "ionic", lambda ionicv1_custom_path: ionicv1_prepare_project(project_path, ionicv1_custom_path) if type == "create_new_project" or type == "add_project_type" else add_ionicv1_settings(project_path, ionicv1_custom_path), None, None)
def add_ionicv1_settings(working_directory, ionicv1_custom_path):
project_path = working_directory
settings = get_project_settings()
if settings :
project_path = settings["project_dir_name"]
flowconfig_file_path = os.path.join(project_path, ".flowconfig")
with open(flowconfig_file_path, 'r+', encoding="utf-8") as file:
content = file.read()
content = content.replace("[ignore]", """[ignore]
<PROJECT_ROOT>/platforms/.*
<PROJECT_ROOT>/hooks/.*
<PROJECT_ROOT>/plugins/.*
<PROJECT_ROOT>/resources/.*""")
file.seek(0)
file.truncate()
file.write(content)
PROJECT_SETTINGS_FOLDER_PATH = os.path.join(project_path, PROJECT_SETTINGS_FOLDER_NAME)
default_config = json.loads(open(os.path.join(PROJECT_FOLDER, "ionicv1", "default_config.json")).read(), object_pairs_hook=collections.OrderedDict)
default_config["working_directory"] = working_directory
default_config["cli_custom_path"] = ionicv1_custom_path
ionicv1_settings = os.path.join(PROJECT_SETTINGS_FOLDER_PATH, "ionicv1_settings.json")
with open(ionicv1_settings, 'w+') as file:
file.write(json.dumps(default_config, indent=2))
def ionicv1_prepare_project(project_path, ionicv1_custom_path):
terminal = Terminal(cwd=project_path)
if sublime.platform() != "windows":
open_project = ["&&", shlex.quote(sublime_executable_path()), shlex.quote(get_project_settings(project_path)["project_file_name"])] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([shlex.quote(ionicv1_custom_path), "start", "myApp", "blank", "--type", "ionic1", ";", "mv", "./myApp/{.[!.],}*", "./", ";", "rm", "-rf", "myApp"] + open_project)
else:
open_project = [sublime_executable_path(), get_project_settings(project_path)["project_file_name"], "&&", "exit"] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([ionicv1_custom_path, "start", "myApp", "blank", "--type", "ionic1", "&", os.path.join(WINDOWS_BATCH_FOLDER, "move_all.bat"), "myApp", ".", "&", "rd", "/s", "/q", "myApp"])
if open_project:
terminal.run(open_project)
add_ionicv1_settings(project_path, ionicv1_custom_path)
Hook.add("ionicv1_after_create_new_project", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_configuration", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_type", ionicv1_ask_custom_path)
class enable_menu_ionicv1EventListener(enable_menu_project_typeEventListener):
project_type = "ionicv1"
path = os.path.join(PROJECT_FOLDER, "ionicv1", "Main.sublime-menu")
path_disabled = os.path.join(PROJECT_FOLDER, "ionicv1", "Main_disabled.sublime-menu")
class ionicv1_cliCommand(manage_cliCommand):
cli = "ionic"
custom_name = "ionicv1"
settings_name = "ionicv1_settings"
def prepare_command(self, **kwargs):
if ":platform" in self.command:
self.window.show_input_panel("Platform:", "", self.platform_on_done, None, None)
else :
self._run()
def platform_on_done(self, platform):
self.placeholders[":platform"] = shlex.quote(platform.strip())
self.command = self.substitute_placeholders(self.command)
self._run()
def _run(self):
try:
self.command = {
'run': lambda : self.command + self.settings["ionicv1_settings"]["platform_run_options"][self.command[2].replace('--', '')][self.command[1]],
'compile': lambda : self.command + self.settings["ionicv1_settings"]["platform_compile_options"][self.command[2].replace('--', '')][self.command[1]],
'build': lambda : self.command + self.settings["ionicv1_settings"]["platform_build_options"][self.command[2].replace('--', '')][self.command[1]],
'prepare': lambda : self.command + self.settings["ionicv2_settings"]["platform_prepare_options"][self.command[1]],
'serve': lambda : self.command + self.settings["ionicv1_settings"]["serve_options"]
}[self.command[0]]()
except KeyError as err:
pass
except Exception as err:
print(traceback.format_exc())
pass
super(ionicv1_cliCommand, self)._run()
| true | true |
f72142e5ac00cf950ce98fbca8180f0dd514c5e9 | 1,671 | py | Python | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def recvfrom(self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| 26.109375 | 75 | 0.618791 | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def recvfrom(self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| true | true |
f721433a67499332ba6e8d52379605bedd3d870c | 408 | py | Python | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | 20 | 2021-03-31T20:10:46.000Z | 2022-02-15T09:58:13.000Z | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from typing import List
def send_email(
*, from_email: str = "Evan <evan@ugent.be>", to: List[str], subject: str, template: str, context_data: dict
):
text_content = render_to_string(template, context_data)
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.send()
| 34 | 111 | 0.759804 | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from typing import List
def send_email(
*, from_email: str = "Evan <evan@ugent.be>", to: List[str], subject: str, template: str, context_data: dict
):
text_content = render_to_string(template, context_data)
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.send()
| true | true |
f721457bba4d592a55104c5e37b8693bb3fe93c6 | 857 | py | Python | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | 1 | 2021-05-18T08:42:18.000Z | 2021-05-18T08:42:18.000Z | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | null | null | null | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-19 19:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_alter_comment_reply'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.28 | 140 | 0.654609 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_alter_comment_reply'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72145de142e44a5179e105ce79e68d8d169b232 | 7,927 | py | Python | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | 1 | 2020-07-22T08:45:28.000Z | 2020-07-22T08:45:28.000Z | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | null | null | null | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import time
from test_framework.key import ECKey
from test_framework.script import *
import struct
import io
def find_unspent(node, amount):
for unspent in node.listunspent():
if unspent['amount'] == amount and unspent['spendable']:
return CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)
assert(False)
class QtumBlockHeaderTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
node = self.nodes[0]
#mocktime = 1490247077
#node.setmocktime(mocktime)
node.generate(10)
self.block_time = int(time.time())+20
for i in range(500):
self.tip = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount()+1), self.block_time+i)
self.tip.solve()
self.sync_blocks([self.tip])
#node.generate(COINBASE_MATURITY+50)
mocktime = COINBASE_MATURITY+50
spendable_addresses = []
# store some addresses to use later
for unspent in node.listunspent():
spendable_addresses.append(unspent['address'])
# first make sure that what is a valid block is accepted
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(time.time()+mocktime+100))
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
# A block that has an OP_CREATE tx, butwith an incorrect state root
"""
pragma solidity ^0.4.11;
contract Test {
function() payable {}
}
"""
tx_hex = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029", 1000000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+200))
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# Create a contract for use later.
"""
pragma solidity ^0.4.11;
contract Test {
function() payable {}
}
"""
contract_address = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029")['address']
node.generate(1)
realHashUTXORoot = int(node.getblock(node.getbestblockhash())['hashUTXORoot'], 16)
realHashStateRoot = int(node.getblock(node.getbestblockhash())['hashStateRoot'], 16)
# A block with both an invalid hashStateRoot and hashUTXORoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+300))
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with a tx, but without updated state hashes
tx_hex = node.sendtocontract(contract_address, "00", 1, 100000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+400))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with an invalid hashUTXORoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+500))
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with an invalid hashStateRoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+600))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# Verify that blocks with a correct hashStateRoot and hashUTXORoot are accepted.
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+700))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
QtumBlockHeaderTest().main()
| 44.284916 | 292 | 0.692444 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import time
from test_framework.key import ECKey
from test_framework.script import *
import struct
import io
def find_unspent(node, amount):
for unspent in node.listunspent():
if unspent['amount'] == amount and unspent['spendable']:
return CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)
assert(False)
class QtumBlockHeaderTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
node = self.nodes[0]
node.generate(10)
self.block_time = int(time.time())+20
for i in range(500):
self.tip = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount()+1), self.block_time+i)
self.tip.solve()
self.sync_blocks([self.tip])
mocktime = COINBASE_MATURITY+50
spendable_addresses = []
for unspent in node.listunspent():
spendable_addresses.append(unspent['address'])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(time.time()+mocktime+100))
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
tx_hex = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029", 1000000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+200))
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
contract_address = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029")['address']
node.generate(1)
realHashUTXORoot = int(node.getblock(node.getbestblockhash())['hashUTXORoot'], 16)
realHashStateRoot = int(node.getblock(node.getbestblockhash())['hashStateRoot'], 16)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+300))
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
tx_hex = node.sendtocontract(contract_address, "00", 1, 100000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+400))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+500))
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+600))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+700))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
def reconnect_p2p(self):
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
QtumBlockHeaderTest().main()
| true | true |
f721490f7323d6f2e9dbcf3d61d3cb7972830a93 | 1,481 | py | Python | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 7 | 2020-02-21T02:43:07.000Z | 2021-04-13T15:09:37.000Z | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 11 | 2020-03-19T09:49:30.000Z | 2021-05-04T15:32:24.000Z | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 17 | 2020-02-17T23:42:37.000Z | 2021-06-16T12:24:49.000Z | # Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
sys.path.append('..')
import mmAuthorization
import requests
import json
viya_host = "localhost"
port = ":8080"
host_url="http://" + viya_host + port
destination_url = host_url + "/modelPublish/destinations/"
mm_auth = mmAuthorization.mmAuthorization("myAuth")
# admin user id and password
admin_userId = "<SAS_user_admin_ID>"
user_passwd = "<SAS_user_password>"
# destination name
dest_name = "<my_CAS_destination_name>"
if admin_userId == "<SAS_user_admin_ID>":
print("You must replace the example values in this script with valid values before executing the script.")
exit(1)
admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd)
destination_cas_headers = {
"If-Match":"false",
"Content-Type":"application/vnd.sas.models.publishing.destination.cas+json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
# create new destination, expecting 201
print("Creating the " + dest_name + " destination...")
destination_attrs = {
"name":dest_name,
"destinationType":"cas",
"casServerName":"cas-shared-default",
"casLibrary" : "public",
"destinationTable" : "SAS_MODEL_TABLE"
}
destination = requests.post(destination_url,
data=json.dumps(destination_attrs), headers=destination_cas_headers)
print(destination)
| 27.943396 | 110 | 0.740041 |
import sys
sys.path.append('..')
import mmAuthorization
import requests
import json
viya_host = "localhost"
port = ":8080"
host_url="http://" + viya_host + port
destination_url = host_url + "/modelPublish/destinations/"
mm_auth = mmAuthorization.mmAuthorization("myAuth")
admin_userId = "<SAS_user_admin_ID>"
user_passwd = "<SAS_user_password>"
dest_name = "<my_CAS_destination_name>"
if admin_userId == "<SAS_user_admin_ID>":
print("You must replace the example values in this script with valid values before executing the script.")
exit(1)
admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd)
destination_cas_headers = {
"If-Match":"false",
"Content-Type":"application/vnd.sas.models.publishing.destination.cas+json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
print("Creating the " + dest_name + " destination...")
destination_attrs = {
"name":dest_name,
"destinationType":"cas",
"casServerName":"cas-shared-default",
"casLibrary" : "public",
"destinationTable" : "SAS_MODEL_TABLE"
}
destination = requests.post(destination_url,
data=json.dumps(destination_attrs), headers=destination_cas_headers)
print(destination)
| true | true |
f7214984dd02b1bc0eae58d46c4bc02d9ce3fa79 | 4,206 | py | Python | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | """
http://matplotlib.org/examples/api/radar_chart.html
Example of creating a radar chart (a.k.a. a spider or star chart) [1]_.
Although this example allows a frame of either 'circle' or 'polygon', polygon
frames don't have proper gridlines (the lines are circles instead of polygons).
It's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in
matplotlib.axis to the desired number of vertices, but the orientation of the
polygon is not aligned with the radial axes.
.. [1] http://en.wikipedia.org/wiki/Radar_chart
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
if __name__ == "__main__":
n_spokes = 5
theta = radar_factory(n_spokes, frame="polygon")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="radar")
datapoints = np.random.random(n_spokes)
ax.plot(theta, datapoints)
ax.fill(theta, datapoints)
plt.show()
| 33.380952 | 79 | 0.622444 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
spine_type = 'circle'
verts = unit_poly_verts(theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
if __name__ == "__main__":
n_spokes = 5
theta = radar_factory(n_spokes, frame="polygon")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="radar")
datapoints = np.random.random(n_spokes)
ax.plot(theta, datapoints)
ax.fill(theta, datapoints)
plt.show()
| true | true |
f7214ba2fb5f78050521fcf3f80e3a68ce4d1155 | 1,826 | py | Python | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 6 | 2020-07-27T21:08:55.000Z | 2021-05-04T07:00:29.000Z | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 18 | 2020-09-01T00:47:04.000Z | 2021-09-15T22:16:56.000Z | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 2 | 2020-08-24T21:50:16.000Z | 2020-12-06T05:18:57.000Z | from smlb import (
params,
Data,
Features,
TabularData,
)
from smlb.feature_selection.selector_protocol_sklearn import SelectorProtocolSklearn
class FeatureSelectorSklearn(Features):
"""Base class for feature selection strategies that use one of scikit-learn's feature selection methods.
This class relies on a ``selector`` provided on initialization that provides ``fit`` and ``get_support`` methods
to select features from a dataset.
"""
def __init__(self, selector: SelectorProtocolSklearn, *args, **kwargs):
"""Initialize state.
Parameters:
selector: Feature selection method that provides ``fit`` and ``get_support`` methods.
"""
super().__init__(*args, **kwargs)
self._selector: SelectorProtocolSklearn = params.instance(
selector, SelectorProtocolSklearn
)
def fit(self, data: Data) -> "FeatureSelectorSklearn":
"""Fit the model with input ``data``.
Parameters:
data: data to fit
Returns:
the instance itself
"""
data = params.instance(data, Data)
n = data.num_samples
xtrain = params.real_matrix(data.samples(), nrows=n)
ytrain = params.real_vector(data.labels(), dimensions=n)
self._selector.fit(xtrain, ytrain)
return self
def apply(self, data: Data) -> TabularData:
"""Select features from the data.
Parameters:
data: data to select features from
Returns:
data with selected features
"""
data = params.instance(data, Data)
samples = params.real_matrix(data.samples())
support = self._selector.get_support()
selected = samples[:, support]
return TabularData(selected, data.labels())
| 28.984127 | 116 | 0.633078 | from smlb import (
params,
Data,
Features,
TabularData,
)
from smlb.feature_selection.selector_protocol_sklearn import SelectorProtocolSklearn
class FeatureSelectorSklearn(Features):
def __init__(self, selector: SelectorProtocolSklearn, *args, **kwargs):
super().__init__(*args, **kwargs)
self._selector: SelectorProtocolSklearn = params.instance(
selector, SelectorProtocolSklearn
)
def fit(self, data: Data) -> "FeatureSelectorSklearn":
data = params.instance(data, Data)
n = data.num_samples
xtrain = params.real_matrix(data.samples(), nrows=n)
ytrain = params.real_vector(data.labels(), dimensions=n)
self._selector.fit(xtrain, ytrain)
return self
def apply(self, data: Data) -> TabularData:
data = params.instance(data, Data)
samples = params.real_matrix(data.samples())
support = self._selector.get_support()
selected = samples[:, support]
return TabularData(selected, data.labels())
| true | true |
f7214d90b8586fc89f0cc957a7cf81ccb7d45c94 | 10,292 | py | Python | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Tuple, Callable
import numpy as np
from numba import jit
from jax import numpy as jnp
import jax
from netket.hilbert import AbstractHilbert
from netket.utils.mpi import mpi_sum, n_nodes
from netket.utils.types import PyTree
from netket.utils.deprecation import deprecated
import netket.jax as nkjax
from .metropolis import MetropolisSampler
@dataclass
class MetropolisNumpySamplerState:
σ: np.ndarray
"""Holds the current configuration."""
σ1: np.ndarray
"""Holds a proposed configuration (preallocation)."""
log_values: np.ndarray
"""Holds model(pars, σ) for the current σ (preallocation)."""
log_values_1: np.ndarray
"""Holds model(pars, σ1) for the last σ1 (preallocation)."""
log_prob_corr: np.ndarray
"""Holds optional acceptance correction (preallocation)."""
rule_state: Any
"""The optional state of the rule."""
rng: Any
"""A numpy random generator."""
n_steps_proc: int = 0
"""Number of moves performed along the chains in this process since the last reset."""
n_accepted_proc: int = 0
"""Number of accepted transitions among the chains in this process since the last reset."""
@property
def acceptance(self) -> float:
"""The fraction of accepted moves across all chains and MPI processes.
The rate is computed since the last reset of the sampler.
Will return None if no sampling has been performed since then.
"""
if self.n_steps == 0:
return None
return self.n_accepted / self.n_steps
@property
@deprecated(
"""Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug."""
)
def acceptance_ratio(self) -> float:
"""DEPRECATED: Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug.
The percentage of accepted moves across all chains and MPI processes.
The rate is computed since the last reset of the sampler.
Will return None if no sampling has been performed since then.
"""
return self.acceptance * 100
@property
def n_steps(self) -> int:
"""Total number of moves performed across all processes since the last reset."""
return self.n_steps_proc * n_nodes
@property
def n_accepted(self) -> int:
"""Total number of moves accepted across all processes since the last reset."""
return mpi_sum(self.n_accepted_proc)
def __repr__(self):
if self.n_steps > 0:
acc_string = "# accepted = {}/{} ({}%), ".format(
self.n_accepted, self.n_steps, self.acceptance * 100
)
else:
acc_string = ""
return f"MetropolisNumpySamplerState({acc_string}rng state={self.rng})"
@partial(jax.jit, static_argnums=0)
def apply_model(machine, pars, weights):
return machine.apply(pars, weights)
class MetropolisSamplerNumpy(MetropolisSampler):
"""
Metropolis-Hastings sampler for an Hilbert space according to a specific transition
rule executed on CPU through Numpy.
This sampler is equivalent to :ref:`netket.sampler.MetropolisSampler` but instead of
executing the whole sampling inside a jax-jitted function, only evaluates the forward
pass inside a jax-jitted function, while proposing new steps and accepting/rejecting
them is performed in numpy.
Because of Jax dispatch cost, and especially for small system, this sampler performs
poorly, while asymptotically it should have the same performance of standard Jax samplers.
However, some transition rules don't work on GPU, and some samplers (Hamiltonian) work
very poorly on jax so this is a good workaround.
See :ref:`netket.sampler.MetropolisSampler` for more informations.
"""
def _init_state(sampler, machine, parameters, key):
rgen = np.random.default_rng(np.asarray(key))
σ = np.zeros((sampler.n_batches, sampler.hilbert.size), dtype=sampler.dtype)
ma_out = jax.eval_shape(machine.apply, parameters, σ)
state = MetropolisNumpySamplerState(
σ=σ,
σ1=np.copy(σ),
log_values=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_values_1=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_prob_corr=np.zeros(
sampler.n_batches, dtype=nkjax.dtype_real(ma_out.dtype)
),
rng=rgen,
rule_state=sampler.rule.init_state(sampler, machine, parameters, rgen),
)
if not sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
return state
def _reset(sampler, machine, parameters, state):
if sampler.reset_chains:
# directly generate a PRNGKey which is a [2xuint32] array
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
state.rule_state = sampler.rule.reset(sampler, machine, parameters, state)
state.log_values = np.copy(apply_model(machine, parameters, state.σ))
state._accepted_samples = 0
state._total_samples = 0
return state
def _sample_next(sampler, machine, parameters, state):
σ = state.σ
σ1 = state.σ1
log_values = state.log_values
log_values_1 = state.log_values_1
log_prob_corr = state.log_prob_corr
mpow = sampler.machine_pow
rgen = state.rng
accepted = 0
for sweep in range(sampler.n_sweeps):
# Propose a new state using the transition kernel
# σp, log_prob_correction =
sampler.rule.transition(sampler, machine, parameters, state, state.rng, σ)
log_values_1 = np.asarray(apply_model(machine, parameters, σ1))
random_uniform = rgen.uniform(0, 1, size=σ.shape[0])
# Acceptance Kernel
accepted += acceptance_kernel(
σ,
σ1,
log_values,
log_values_1,
log_prob_corr,
mpow,
random_uniform,
)
state.n_steps_proc += sampler.n_sweeps * sampler.n_chains
state.n_accepted_proc += accepted
return state, state.σ
def _sample_chain(
sampler,
machine: Callable,
parameters: PyTree,
state: MetropolisNumpySamplerState,
chain_length: int,
) -> Tuple[jnp.ndarray, MetropolisNumpySamplerState]:
samples = np.empty(
(chain_length, sampler.n_chains, sampler.hilbert.size), dtype=sampler.dtype
)
for i in range(chain_length):
state, σ = sampler.sample_next(machine, parameters, state)
samples[i] = σ
return samples, state
def __repr__(sampler):
return (
"MetropolisSamplerNumpy("
+ "\n hilbert = {},".format(sampler.hilbert)
+ "\n rule = {},".format(sampler.rule)
+ "\n n_chains = {},".format(sampler.n_chains)
+ "\n machine_power = {},".format(sampler.machine_pow)
+ "\n reset_chains = {},".format(sampler.reset_chains)
+ "\n n_sweeps = {},".format(sampler.n_sweeps)
+ "\n dtype = {},".format(sampler.dtype)
+ ")"
)
def __str__(sampler):
return (
"MetropolisSamplerNumpy("
+ "rule = {}, ".format(sampler.rule)
+ "n_chains = {}, ".format(sampler.n_chains)
+ "machine_power = {}, ".format(sampler.machine_pow)
+ "n_sweeps = {}, ".format(sampler.n_sweeps)
+ "dtype = {})".format(sampler.dtype)
)
@jit(nopython=True)
def acceptance_kernel(
σ, σ1, log_values, log_values_1, log_prob_corr, machine_pow, random_uniform
):
accepted = 0
for i in range(σ.shape[0]):
prob = np.exp(
machine_pow * (log_values_1[i] - log_values[i]).real + log_prob_corr[i]
)
assert not math.isnan(prob)
if prob > random_uniform[i]:
log_values[i] = log_values_1[i]
σ[i] = σ1[i]
accepted += 1
return accepted
def MetropolisLocalNumpy(hilbert: AbstractHilbert, *args, **kwargs):
from .rules import LocalRuleNumpy
rule = LocalRuleNumpy()
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisHamiltonianNumpy(hilbert: AbstractHilbert, hamiltonian, *args, **kwargs):
from .rules import HamiltonianRuleNumpy
rule = HamiltonianRuleNumpy(hamiltonian)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisCustomNumpy(
hilbert: AbstractHilbert, move_operators, move_weights=None, *args, **kwargs
):
from .rules import CustomRuleNumpy
rule = CustomRuleNumpy(move_operators, move_weights)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
| 33.744262 | 95 | 0.643412 |
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Tuple, Callable
import numpy as np
from numba import jit
from jax import numpy as jnp
import jax
from netket.hilbert import AbstractHilbert
from netket.utils.mpi import mpi_sum, n_nodes
from netket.utils.types import PyTree
from netket.utils.deprecation import deprecated
import netket.jax as nkjax
from .metropolis import MetropolisSampler
@dataclass
class MetropolisNumpySamplerState:
σ: np.ndarray
σ1: np.ndarray
log_values: np.ndarray
log_values_1: np.ndarray
log_prob_corr: np.ndarray
rule_state: Any
rng: Any
n_steps_proc: int = 0
n_accepted_proc: int = 0
@property
def acceptance(self) -> float:
if self.n_steps == 0:
return None
return self.n_accepted / self.n_steps
@property
@deprecated(
"""Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug."""
)
def acceptance_ratio(self) -> float:
return self.acceptance * 100
@property
def n_steps(self) -> int:
return self.n_steps_proc * n_nodes
@property
def n_accepted(self) -> int:
return mpi_sum(self.n_accepted_proc)
def __repr__(self):
if self.n_steps > 0:
acc_string = "# accepted = {}/{} ({}%), ".format(
self.n_accepted, self.n_steps, self.acceptance * 100
)
else:
acc_string = ""
return f"MetropolisNumpySamplerState({acc_string}rng state={self.rng})"
@partial(jax.jit, static_argnums=0)
def apply_model(machine, pars, weights):
return machine.apply(pars, weights)
class MetropolisSamplerNumpy(MetropolisSampler):
def _init_state(sampler, machine, parameters, key):
rgen = np.random.default_rng(np.asarray(key))
σ = np.zeros((sampler.n_batches, sampler.hilbert.size), dtype=sampler.dtype)
ma_out = jax.eval_shape(machine.apply, parameters, σ)
state = MetropolisNumpySamplerState(
σ=σ,
σ1=np.copy(σ),
log_values=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_values_1=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_prob_corr=np.zeros(
sampler.n_batches, dtype=nkjax.dtype_real(ma_out.dtype)
),
rng=rgen,
rule_state=sampler.rule.init_state(sampler, machine, parameters, rgen),
)
if not sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
return state
def _reset(sampler, machine, parameters, state):
if sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
state.rule_state = sampler.rule.reset(sampler, machine, parameters, state)
state.log_values = np.copy(apply_model(machine, parameters, state.σ))
state._accepted_samples = 0
state._total_samples = 0
return state
def _sample_next(sampler, machine, parameters, state):
σ = state.σ
σ1 = state.σ1
log_values = state.log_values
log_values_1 = state.log_values_1
log_prob_corr = state.log_prob_corr
mpow = sampler.machine_pow
rgen = state.rng
accepted = 0
for sweep in range(sampler.n_sweeps):
sampler.rule.transition(sampler, machine, parameters, state, state.rng, σ)
log_values_1 = np.asarray(apply_model(machine, parameters, σ1))
random_uniform = rgen.uniform(0, 1, size=σ.shape[0])
accepted += acceptance_kernel(
σ,
σ1,
log_values,
log_values_1,
log_prob_corr,
mpow,
random_uniform,
)
state.n_steps_proc += sampler.n_sweeps * sampler.n_chains
state.n_accepted_proc += accepted
return state, state.σ
def _sample_chain(
sampler,
machine: Callable,
parameters: PyTree,
state: MetropolisNumpySamplerState,
chain_length: int,
) -> Tuple[jnp.ndarray, MetropolisNumpySamplerState]:
samples = np.empty(
(chain_length, sampler.n_chains, sampler.hilbert.size), dtype=sampler.dtype
)
for i in range(chain_length):
state, σ = sampler.sample_next(machine, parameters, state)
samples[i] = σ
return samples, state
def __repr__(sampler):
return (
"MetropolisSamplerNumpy("
+ "\n hilbert = {},".format(sampler.hilbert)
+ "\n rule = {},".format(sampler.rule)
+ "\n n_chains = {},".format(sampler.n_chains)
+ "\n machine_power = {},".format(sampler.machine_pow)
+ "\n reset_chains = {},".format(sampler.reset_chains)
+ "\n n_sweeps = {},".format(sampler.n_sweeps)
+ "\n dtype = {},".format(sampler.dtype)
+ ")"
)
def __str__(sampler):
return (
"MetropolisSamplerNumpy("
+ "rule = {}, ".format(sampler.rule)
+ "n_chains = {}, ".format(sampler.n_chains)
+ "machine_power = {}, ".format(sampler.machine_pow)
+ "n_sweeps = {}, ".format(sampler.n_sweeps)
+ "dtype = {})".format(sampler.dtype)
)
@jit(nopython=True)
def acceptance_kernel(
σ, σ1, log_values, log_values_1, log_prob_corr, machine_pow, random_uniform
):
accepted = 0
for i in range(σ.shape[0]):
prob = np.exp(
machine_pow * (log_values_1[i] - log_values[i]).real + log_prob_corr[i]
)
assert not math.isnan(prob)
if prob > random_uniform[i]:
log_values[i] = log_values_1[i]
σ[i] = σ1[i]
accepted += 1
return accepted
def MetropolisLocalNumpy(hilbert: AbstractHilbert, *args, **kwargs):
from .rules import LocalRuleNumpy
rule = LocalRuleNumpy()
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisHamiltonianNumpy(hilbert: AbstractHilbert, hamiltonian, *args, **kwargs):
from .rules import HamiltonianRuleNumpy
rule = HamiltonianRuleNumpy(hamiltonian)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisCustomNumpy(
hilbert: AbstractHilbert, move_operators, move_weights=None, *args, **kwargs
):
from .rules import CustomRuleNumpy
rule = CustomRuleNumpy(move_operators, move_weights)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
| true | true |
f7214da5be355cbd7977e3d4b792fe2a9df91d2e | 393 | py | Python | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | 8 | 2019-05-29T09:07:25.000Z | 2021-12-28T13:53:50.000Z | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | null | null | null | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | 2 | 2019-06-02T17:48:48.000Z | 2022-03-13T01:29:16.000Z | import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('../data1.csv')
df=df.values
#time series vs reservoir levels(ft) graph
sns.set_style('darkgrid')
plt.plot(df[:,0],df[:,1],label="")
plt.plot(df[:,0],df[:,2])
plt.xlabel('Time Series')
plt.ylabel('Reservoir Levels(ft)')
plt.title('Dialy Bhakhra Reservoir Levels for past 20 years')
plt.show()
| 24.5625 | 61 | 0.725191 | import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('../data1.csv')
df=df.values
sns.set_style('darkgrid')
plt.plot(df[:,0],df[:,1],label="")
plt.plot(df[:,0],df[:,2])
plt.xlabel('Time Series')
plt.ylabel('Reservoir Levels(ft)')
plt.title('Dialy Bhakhra Reservoir Levels for past 20 years')
plt.show()
| true | true |
f7214e4b71ef6b1633236cc12a531f99e4afc41e | 7,828 | py | Python | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import io
from .errors import DiscordException
from .errors import InvalidArgument
from . import utils
VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"})
VALID_AVATAR_FORMATS = VALID_STATIC_FORMATS | {"gif"}
class Asset:
"""Represents a CDN asset on Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the CDN asset.
.. describe:: len(x)
Returns the length of the CDN asset's URL.
.. describe:: bool(x)
Checks if the Asset has a URL.
.. describe:: x == y
Checks if the asset is equal to another asset.
.. describe:: x != y
Checks if the asset is not equal to another asset.
.. describe:: hash(x)
Returns the hash of the asset.
"""
__slots__ = ('_state', '_url')
BASE = 'https://cdn.discordapp.com'
def __init__(self, state, url=None):
self._state = state
self._url = url
@classmethod
def _from_avatar(cls, state, user, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be None or one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not user.is_avatar_animated():
raise InvalidArgument("non animated avatars do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if user.avatar is None:
return user.default_avatar_url
if format is None:
format = 'gif' if user.is_avatar_animated() else static_format
return cls(state, '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(user, format, size))
@classmethod
def _from_icon(cls, state, object, path):
if object.icon is None:
return cls(state)
url = '/{0}-icons/{1.id}/{1.icon}.jpg'.format(path, object)
return cls(state, url)
@classmethod
def _from_cover_image(cls, state, obj):
if obj.cover_image is None:
return cls(state)
url = '/app-assets/{0.id}/store/{0.cover_image}.jpg'.format(obj)
return cls(state, url)
@classmethod
def _from_guild_image(cls, state, id, hash, key, *, format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_STATIC_FORMATS))
if hash is None:
return cls(state)
url = '/{key}/{0}/{1}.{2}?size={3}'
return cls(state, url.format(id, hash, format, size, key=key))
@classmethod
def _from_guild_icon(cls, state, guild, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not guild.is_icon_animated():
raise InvalidArgument("non animated guild icons do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if guild.icon is None:
return cls(state)
if format is None:
format = 'gif' if guild.is_icon_animated() else static_format
return cls(state, '/icons/{0.id}/{0.icon}.{1}?size={2}'.format(guild, format, size))
def __str__(self):
return self.BASE + self._url if self._url is not None else ''
def __len__(self):
if self._url:
return len(self.BASE + self._url)
return 0
def __bool__(self):
return self._url is not None
def __repr__(self):
return '<Asset url={0._url!r}>'.format(self)
def __eq__(self, other):
return isinstance(other, Asset) and self._url == other._url
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._url)
async def read(self):
"""|coro|
Retrieves the content of this asset as a :class:`bytes` object.
.. warning::
:class:`PartialEmoji` won't have a connection state if user created,
and a URL won't be present if a custom image isn't associated with
the asset, e.g. a guild with no custom icon.
.. versionadded:: 1.1
Raises
------
DiscordException
There was no valid URL or internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if not self._url:
raise DiscordException('Invalid asset (no URL provided)')
if self._state is None:
raise DiscordException('Invalid state (no ConnectionState provided)')
return await self._state.http.get_from_cdn(self.BASE + self._url)
async def save(self, fp, *, seek_begin=True):
"""|coro|
Saves this asset into a file-like object.
Parameters
----------
fp: Union[BinaryIO, :class:`os.PathLike`]
Same as in :meth:`Attachment.save`.
seek_begin: :class:`bool`
Same as in :meth:`Attachment.save`.
Raises
------
DiscordException
There was no valid URL or internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read()
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
| 33.452991 | 99 | 0.605646 |
import io
from .errors import DiscordException
from .errors import InvalidArgument
from . import utils
VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"})
VALID_AVATAR_FORMATS = VALID_STATIC_FORMATS | {"gif"}
class Asset:
__slots__ = ('_state', '_url')
BASE = 'https://cdn.discordapp.com'
def __init__(self, state, url=None):
self._state = state
self._url = url
@classmethod
def _from_avatar(cls, state, user, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be None or one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not user.is_avatar_animated():
raise InvalidArgument("non animated avatars do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if user.avatar is None:
return user.default_avatar_url
if format is None:
format = 'gif' if user.is_avatar_animated() else static_format
return cls(state, '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(user, format, size))
@classmethod
def _from_icon(cls, state, object, path):
if object.icon is None:
return cls(state)
url = '/{0}-icons/{1.id}/{1.icon}.jpg'.format(path, object)
return cls(state, url)
@classmethod
def _from_cover_image(cls, state, obj):
if obj.cover_image is None:
return cls(state)
url = '/app-assets/{0.id}/store/{0.cover_image}.jpg'.format(obj)
return cls(state, url)
@classmethod
def _from_guild_image(cls, state, id, hash, key, *, format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_STATIC_FORMATS))
if hash is None:
return cls(state)
url = '/{key}/{0}/{1}.{2}?size={3}'
return cls(state, url.format(id, hash, format, size, key=key))
@classmethod
def _from_guild_icon(cls, state, guild, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not guild.is_icon_animated():
raise InvalidArgument("non animated guild icons do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if guild.icon is None:
return cls(state)
if format is None:
format = 'gif' if guild.is_icon_animated() else static_format
return cls(state, '/icons/{0.id}/{0.icon}.{1}?size={2}'.format(guild, format, size))
def __str__(self):
return self.BASE + self._url if self._url is not None else ''
def __len__(self):
if self._url:
return len(self.BASE + self._url)
return 0
def __bool__(self):
return self._url is not None
def __repr__(self):
return '<Asset url={0._url!r}>'.format(self)
def __eq__(self, other):
return isinstance(other, Asset) and self._url == other._url
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._url)
async def read(self):
if not self._url:
raise DiscordException('Invalid asset (no URL provided)')
if self._state is None:
raise DiscordException('Invalid state (no ConnectionState provided)')
return await self._state.http.get_from_cdn(self.BASE + self._url)
async def save(self, fp, *, seek_begin=True):
data = await self.read()
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
| true | true |
f7214e57978a52886d37352d317afc38f1a60349 | 3,932 | py | Python | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, AwardProcurementFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c23_award_financial_1'
def test_column_headers(database):
expected_subset = {"row_number", "transaction_obligated_amou_sum", "federal_action_obligation_sum"}
actual = set(query_columns(_FILE, database))
assert expected_subset <= actual
def test_success(database):
""" Test that a four digit object class with no flag is a success, and a three digit object class with a flag is a success"""
# Create a 12 character random piid
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_three = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid,
allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid,
allocation_transfer_agency = None)
# And add a row for a different piid
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two,
allocation_transfer_agency = None)
third_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = 123)
third_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
third_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1)
second_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -9999)
third_piid_ap_row = AwardProcurementFactory(piid = piid_three, federal_action_obligation = -9999)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one,
third_piid_row_one, first_ap_row, second_ap_row, third_ap_row, second_piid_ap_row, third_piid_ap_row,
third_piid_row_two])
assert errors == 0
def test_failure(database):
""" Test that a three digit object class with no flag is an error"""
# Create a 12 character random piid
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid, allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid, allocation_transfer_agency = None)
# And add a row that shouldn't be included
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two, allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
other_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -1111)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one, first_ap_row, second_ap_row, other_piid_ap_row])
assert errors == 2
| 65.533333 | 164 | 0.739827 | from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, AwardProcurementFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c23_award_financial_1'
def test_column_headers(database):
expected_subset = {"row_number", "transaction_obligated_amou_sum", "federal_action_obligation_sum"}
actual = set(query_columns(_FILE, database))
assert expected_subset <= actual
def test_success(database):
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_three = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid,
allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid,
allocation_transfer_agency = None)
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two,
allocation_transfer_agency = None)
third_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = 123)
third_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
third_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1)
second_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -9999)
third_piid_ap_row = AwardProcurementFactory(piid = piid_three, federal_action_obligation = -9999)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one,
third_piid_row_one, first_ap_row, second_ap_row, third_ap_row, second_piid_ap_row, third_piid_ap_row,
third_piid_row_two])
assert errors == 0
def test_failure(database):
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid, allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid, allocation_transfer_agency = None)
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two, allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
other_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -1111)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one, first_ap_row, second_ap_row, other_piid_ap_row])
assert errors == 2
| true | true |
f7214fd6196b206a0fc7264a73b8e0fd22653169 | 1,766 | py | Python | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null |
# https://huggingface.co/vumichien/wav2vec2-large-xlsr-japanese
import torch
import torchaudio
import librosa
from datasets import load_dataset
import MeCab
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
# config
wakati = MeCab.Tagger("-Owakati")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・]'
# load data, processor and model
test_dataset = load_dataset("common_voice", "ja", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
model = Wav2Vec2ForCTC.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
# Preprocessing the datasets.
def speech_file_to_array_fn(batch):
batch["sentence"] = wakati.parse(batch["sentence"]).strip()
batch["sentence"] = re.sub(chars_to_ignore_regex,'', batch["sentence"]).strip()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
print(batch["sentence"])
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
| 47.72973 | 325 | 0.64043 |
import torch
import torchaudio
import librosa
from datasets import load_dataset
import MeCab
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
wakati = MeCab.Tagger("-Owakati")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・]'
test_dataset = load_dataset("common_voice", "ja", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
model = Wav2Vec2ForCTC.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
def speech_file_to_array_fn(batch):
batch["sentence"] = wakati.parse(batch["sentence"]).strip()
batch["sentence"] = re.sub(chars_to_ignore_regex,'', batch["sentence"]).strip()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
print(batch["sentence"])
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
| true | true |
f72150878c28e84523ea2167e57b4bc5ae34cb23 | 1,024 | py | Python | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | def zigzagTraversal(root):
queue = [root] # initialize queue to root node
result = []
while queue: # iterate through loop while queue is not empty
arr = []
# levelSize prevents us from looping pasts current level in queue
levelSize = len(queue)
for _ in range(levelSize):
# these two lines act as .shift() method
curr = queue[0]
queue = queue[1:]
# .unshift() curr.val on odd levels
if len(result) % 2 == 0: arr.append(curr.val)
else: arr = [curr.val] + arr
# add child nodes to queue
if curr.left: queue.append(curr.left)
if curr.right: queue.append(curr.right)
result.append(arr)
return result
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six = Node(6)
seven = Node(7)
one.left = two
one.right = three
two.left = four
two.right = five
three.left = six
three.right = seven
print(zigzagTraversal(one))
| 20.897959 | 69 | 0.636719 | def zigzagTraversal(root):
queue = [root]
result = []
while queue:
arr = []
levelSize = len(queue)
for _ in range(levelSize):
curr = queue[0]
queue = queue[1:]
if len(result) % 2 == 0: arr.append(curr.val)
else: arr = [curr.val] + arr
if curr.left: queue.append(curr.left)
if curr.right: queue.append(curr.right)
result.append(arr)
return result
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six = Node(6)
seven = Node(7)
one.left = two
one.right = three
two.left = four
two.right = five
three.left = six
three.right = seven
print(zigzagTraversal(one))
| true | true |
f7215088d8da5bd6c6c28282619051368571f5b9 | 1,260 | py | Python | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | 1 | 2020-11-16T17:11:43.000Z | 2020-11-16T17:11:43.000Z | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
import cv2
classes = []
def convert_annotation(image, args):
if args.anno_dir:
anno_file = join(args.anno_dir, image.split('.')[0]) + '.xml'
if not os.path.isfile(anno_file):
return False
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
if (w <= 0 or h <= 0):
print('Fixing: '+anno_file)
img_file = anno_file.replace('Annotations', 'JPEGImages').replace('annotations', 'JPEGImages').replace('.xml','.jpg')
img = cv2.imread(img_file)
h, w, _ = img.shape
size.find('width').text=str(w)
size.find('height').text=str(h)
tree.write(anno_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
required=True,
help="Directory for VOC annotation xml files")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
anno_files.sort()
for anno in anno_files:
convert_annotation(anno, args)
| 28.636364 | 125 | 0.618254 | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
import cv2
classes = []
def convert_annotation(image, args):
if args.anno_dir:
anno_file = join(args.anno_dir, image.split('.')[0]) + '.xml'
if not os.path.isfile(anno_file):
return False
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
if (w <= 0 or h <= 0):
print('Fixing: '+anno_file)
img_file = anno_file.replace('Annotations', 'JPEGImages').replace('annotations', 'JPEGImages').replace('.xml','.jpg')
img = cv2.imread(img_file)
h, w, _ = img.shape
size.find('width').text=str(w)
size.find('height').text=str(h)
tree.write(anno_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
required=True,
help="Directory for VOC annotation xml files")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
anno_files.sort()
for anno in anno_files:
convert_annotation(anno, args)
| true | true |
f721509f3f35df7cb0d1888befa5bc53be9b4653 | 422 | py | Python | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | 3 | 2022-03-02T19:30:26.000Z | 2022-03-04T10:55:10.000Z | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | 3 | 2022-03-08T12:25:16.000Z | 2022-03-16T22:30:55.000Z | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-11-05 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translation_management_tool', '0005_auto_20211105_1418'),
]
operations = [
migrations.AlterField(
model_name='language',
name='languages',
field=models.CharField(blank=True, max_length=7),
),
]
| 22.210526 | 67 | 0.625592 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translation_management_tool', '0005_auto_20211105_1418'),
]
operations = [
migrations.AlterField(
model_name='language',
name='languages',
field=models.CharField(blank=True, max_length=7),
),
]
| true | true |
f7215171b2591e8446240124dc5466d1022604c1 | 631 | py | Python | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# class AddressItem(scrapy.Item):
# # define the fields for your item here like:
# # name = scrapy.Field()
# pass
class AddressItem(scrapy.Item):
# define the fields for your item here:
school_id = scrapy.Field()
name = scrapy.Field()
street_address = scrapy.Field()
city = scrapy.Field()
state = scrapy.Field()
postcode = scrapy.Field()
ref = scrapy.Field()
website = scrapy.Field()
extras = scrapy.Field()
| 22.535714 | 52 | 0.652932 |
import scrapy
eld()
name = scrapy.Field()
street_address = scrapy.Field()
city = scrapy.Field()
state = scrapy.Field()
postcode = scrapy.Field()
ref = scrapy.Field()
website = scrapy.Field()
extras = scrapy.Field()
| true | true |
f72152194d185954a947a244a1109ed7161112ed | 26,894 | py | Python | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import training_util
# The defaults are historical artifacts of the initial implementation, but seem
# reasonable choices.
_DEFAULT_LEARNING_RATE = 0.05
_DEFAULT_CLIP_NORM = 5.0
_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,
'lstm': rnn_cell.BasicLSTMCell,
'gru': rnn_cell.GRUCell}
# Indicates no value was provided by the user to a kwarg.
USE_DEFAULT = object()
def _single_rnn_cell(num_units, cell_type):
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):
raise ValueError('Supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):
"""Convenience function to create `rnn_cell_fn` for canned RNN Estimators.
Args:
num_units: Iterable of integer number of hidden units per RNN layer.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`.
Returns:
A function that takes a single argument, an instance of
`tf.estimator.ModeKeys`, and returns an instance derived from
`tf.nn.rnn_cell.RNNCell`.
Raises:
ValueError: If cell_type is not supported.
"""
def rnn_cell_fn(mode):
# Unused. Part of the rnn_cell_fn interface since user specified functions
# may need different behavior across modes (e.g. dropout).
del mode
cells = [_single_rnn_cell(n, cell_type) for n in num_units]
if len(cells) == 1:
return cells[0]
return rnn_cell.MultiRNNCell(cells)
return rnn_cell_fn
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def _select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
def _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,
context_feature_columns, input_layer_partitioner,
return_sequences=False):
"""Function builder for a rnn logit_fn.
Args:
output_units: An int indicating the dimension of the logit layer.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input.
context_feature_columns: An iterable containing the `FeatureColumn`s
that represent contextual input.
input_layer_partitioner: Partitioner for input layer.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
Returns:
A logit_fn (see below).
Raises:
ValueError: If output_units is not an int.
"""
if not isinstance(output_units, int):
raise ValueError('output_units must be an int. Given type: {}'.format(
type(output_units)))
def rnn_logit_fn(features, mode):
"""Recurrent Neural Network logit_fn.
Args:
features: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
mode: Optional. Specifies if this training, evaluation or prediction. See
`ModeKeys`.
Returns:
A `Tensor` representing the logits.
"""
with variable_scope.variable_scope(
'sequence_input_layer',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
sequence_input, sequence_length = seq_fc.sequence_input_layer(
features=features, feature_columns=sequence_feature_columns)
summary.histogram('sequence_length', sequence_length)
if context_feature_columns:
context_input = feature_column_lib.input_layer(
features=features,
feature_columns=context_feature_columns)
sequence_input = _concatenate_context_input(sequence_input,
context_input)
cell = rnn_cell_fn(mode)
# Ignore output state.
rnn_outputs, _ = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
sequence_length=sequence_length,
dtype=dtypes.float32,
time_major=False)
if not return_sequences:
rnn_outputs = _select_last_activations(rnn_outputs, sequence_length)
with variable_scope.variable_scope('logits', values=(rnn_outputs,)):
logits = core_layers.dense(
rnn_outputs,
units=output_units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer())
return logits
return rnn_logit_fn
def _rnn_model_fn(features,
labels,
mode,
head,
rnn_cell_fn,
sequence_feature_columns,
context_feature_columns,
return_sequences=False,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Recurrent Neural Net model_fn.
Args:
features: dict of `Tensor` and `SparseTensor` objects returned from
`input_fn`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: Iterable containing `FeatureColumn`s that
represent sequential model inputs.
context_feature_columns: Iterable containing `FeatureColumn`s that
represent model inputs not associated with a specific timestep.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05 and gradient clip norm of
5.0.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or optimizer is invalid, or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
# If user does not provide an optimizer instance, use the optimizer specified
# by the string with default learning rate and gradient clipping.
if not isinstance(optimizer, optimizer_lib.Optimizer):
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'rnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
logit_fn = _rnn_logit_fn_builder(
output_units=head.logits_dimension,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner,
return_sequences=return_sequences)
logits = logit_fn(features=features, mode=mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):
"""Assert arguments are valid and return rnn_cell_fn."""
if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):
raise ValueError(
'num_units and cell_type must not be specified when using rnn_cell_fn'
)
if not rnn_cell_fn:
if cell_type == USE_DEFAULT:
cell_type = 'basic_rnn'
rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)
return rnn_cell_fn
class RNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow RNN models.
Trains a recurrent neural network model to classify instances into one of
multiple classes.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNClassifier(
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNClassifier` instance.
Args:
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=False,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class RNNEstimator(estimator.Estimator):
"""An Estimator for TensorFlow RNN models with user-specified head.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Or with custom RNN cell:
def rnn_cell_fn(mode):
cells = [ tf.contrib.rnn.LSTMCell(size) for size in [32, 16] ]
if mode == tf.estimator.ModeKeys.TRAIN:
cells = [ tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.5)
for cell in cells ]
return tf.contrib.rnn.MultiRNNCell(cells)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
rnn_cell_fn=rnn_cell_fn)
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if the head's `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss and predicted output are determined by the specified head.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
head,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
return_sequences=False,
model_dir=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNEstimator` instance.
Args:
head: A `_Head` instance constructed with a method such as
`tf.contrib.estimator.multi_label_head`. This specifies the model's
output and loss function to be optimized.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=return_sequences,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| 41.696124 | 112 | 0.704618 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import training_util
_DEFAULT_LEARNING_RATE = 0.05
_DEFAULT_CLIP_NORM = 5.0
_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,
'lstm': rnn_cell.BasicLSTMCell,
'gru': rnn_cell.GRUCell}
USE_DEFAULT = object()
def _single_rnn_cell(num_units, cell_type):
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):
raise ValueError('Supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):
def rnn_cell_fn(mode):
del mode
cells = [_single_rnn_cell(n, cell_type) for n in num_units]
if len(cells) == 1:
return cells[0]
return rnn_cell.MultiRNNCell(cells)
return rnn_cell_fn
def _concatenate_context_input(sequence_input, context_input):
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def _select_last_activations(activations, sequence_lengths):
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
def _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,
context_feature_columns, input_layer_partitioner,
return_sequences=False):
if not isinstance(output_units, int):
raise ValueError('output_units must be an int. Given type: {}'.format(
type(output_units)))
def rnn_logit_fn(features, mode):
with variable_scope.variable_scope(
'sequence_input_layer',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
sequence_input, sequence_length = seq_fc.sequence_input_layer(
features=features, feature_columns=sequence_feature_columns)
summary.histogram('sequence_length', sequence_length)
if context_feature_columns:
context_input = feature_column_lib.input_layer(
features=features,
feature_columns=context_feature_columns)
sequence_input = _concatenate_context_input(sequence_input,
context_input)
cell = rnn_cell_fn(mode)
rnn_outputs, _ = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
sequence_length=sequence_length,
dtype=dtypes.float32,
time_major=False)
if not return_sequences:
rnn_outputs = _select_last_activations(rnn_outputs, sequence_length)
with variable_scope.variable_scope('logits', values=(rnn_outputs,)):
logits = core_layers.dense(
rnn_outputs,
units=output_units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer())
return logits
return rnn_logit_fn
def _rnn_model_fn(features,
labels,
mode,
head,
rnn_cell_fn,
sequence_feature_columns,
context_feature_columns,
return_sequences=False,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not isinstance(optimizer, optimizer_lib.Optimizer):
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'rnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
logit_fn = _rnn_logit_fn_builder(
output_units=head.logits_dimension,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner,
return_sequences=return_sequences)
logits = logit_fn(features=features, mode=mode)
def _train_op_fn(loss):
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):
if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):
raise ValueError(
'num_units and cell_type must not be specified when using rnn_cell_fn'
)
if not rnn_cell_fn:
if cell_type == USE_DEFAULT:
cell_type = 'basic_rnn'
rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)
return rnn_cell_fn
class RNNClassifier(estimator.Estimator):
def __init__(self,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
input_layer_partitioner=None,
config=None):
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=False,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class RNNEstimator(estimator.Estimator):
def __init__(self,
head,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
return_sequences=False,
model_dir=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=return_sequences,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| true | true |
f72153ee7a89b3a20eafa2cc82f8c626ef1eda68 | 688 | py | Python | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-12-28T09:28:43.000Z | 2021-05-11T02:01:47.000Z | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | null | null | null | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-07-23T09:11:55.000Z | 2019-10-02T17:13:53.000Z | #!/usr/bin/env python
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import doctest
import unittest
from . import graphemecluster
from .db import iter_grapheme_cluster_break_tests
from .test import implement_break_tests
@implement_break_tests(graphemecluster.grapheme_cluster_boundaries,
iter_grapheme_cluster_break_tests())
class GraphemeClusterTest(unittest.TestCase):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(graphemecluster))
return tests
if __name__ == '__main__':
unittest.main()
| 22.933333 | 67 | 0.694767 |
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import doctest
import unittest
from . import graphemecluster
from .db import iter_grapheme_cluster_break_tests
from .test import implement_break_tests
@implement_break_tests(graphemecluster.grapheme_cluster_boundaries,
iter_grapheme_cluster_break_tests())
class GraphemeClusterTest(unittest.TestCase):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(graphemecluster))
return tests
if __name__ == '__main__':
unittest.main()
| true | true |
f721549c945034a707dcd61c8eb272e55a908d06 | 6,155 | py | Python | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 213 | 2018-07-05T21:21:21.000Z | 2022-03-22T04:54:53.000Z | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 259 | 2018-06-22T16:46:33.000Z | 2022-03-23T19:39:15.000Z | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 27 | 2019-03-26T12:46:49.000Z | 2022-02-21T16:56:23.000Z | import ast
import os
import shlex
import re
from os.path import join, dirname, isdir, exists
import pytest
from pytest_cases.common_mini_six import string_types
# Make the list of all tests that we will have to execute (each in an independent pytest runner)
THIS_DIR = dirname(__file__)
tests_raw_folder = join(THIS_DIR, 'raw')
test_files = [f for f in os.listdir(tests_raw_folder) if not f.startswith('_')]
META_REGEX = re.compile(
"""^(# META
# )(?P<asserts_dct>.*)(
# END META)
.*""")
@pytest.mark.parametrize('test_to_run', test_files, ids=str)
def test_run_all_tests(test_to_run, testdir):
"""
This is a meta-test. It is executed for each test file in the 'raw' folder.
For each of them, the file is retrieved and the expected test results are read from its first lines.
Then a dedicated pytest runner is run on this file, and the results are compared with the expected ones.
See https://docs.pytest.org/en/latest/writing_plugins.html
:param test_to_run:
:param testdir:
:return:
"""
cmdargs = []
conf_file_path = None
test_to_run_path = join(tests_raw_folder, test_to_run)
if isdir(test_to_run_path):
test_folder_path = test_to_run_path
# check if there is a cmdargs file
cmdargs_file_path = join(test_folder_path, "cmdargs.txt")
if exists(cmdargs_file_path):
with open(cmdargs_file_path) as c:
cmdargs = c.read()
cmdargs = process_cmdargs(cmdargs)
# check if there is a conf file
conf_file_path = join(test_folder_path, "conf.py")
if exists(conf_file_path):
with open(conf_file_path) as c:
cfg_contents = c.read()
# Create a temporary conftest.py file
print("\nConfig contents: %s" % cfg_contents)
testdir.makeconftest(cfg_contents)
# the test file should have the same name than the dir
test_to_run = test_to_run + ".py"
test_to_run_path = join(test_folder_path, test_to_run)
if not exists(test_to_run_path):
raise ValueError("Test file %s not found in folder %s" % (test_to_run, test_folder_path))
with open(test_to_run_path) as f:
# create a temporary pytest test file
test_file_contents = f.read()
testdir.makepyfile(test_file_contents)
# Grab the expected things to check when this is executed
m = META_REGEX.match(test_file_contents)
if m is None:
raise ValueError("test file '%s' does not contain the META-header" % test_to_run)
asserts_dct_str = m.groupdict()['asserts_dct']
asserts_dct = ast.literal_eval(asserts_dct_str)
# Here we run pytest
print("\nTesting that running pytest on file %s with config file %s results in %s."
"" % (test_to_run, conf_file_path, str(asserts_dct)))
print("For debug, temp dir is: %s" % testdir.tmpdir)
# protect against pycharm fiddling with the config
from _pytest import config
jb_prepareconfig = config._prepareconfig
if jb_prepareconfig.__module__ != config.get_config.__module__:
# we are in pycharm ! Fix that
config._prepareconfig = get_pytest_prepare_config()
# run
# first = testdir.runpytest("--collect-only", "-p", "no:cacheprovider") # ("-q")
# outfirst = "\n".join(first.outlines)
# assert "collected 1 items" in outfirst
# ********* RUN *********
result = testdir.runpytest(*cmdargs) # ("-q")
# put back the PyCharm hack
config._prepareconfig = jb_prepareconfig
# Here we check that everything is ok
try:
result.assert_outcomes(**asserts_dct)
except Exception as e:
err = Exception("Error while asserting that %s results in %s. Actual results: %s"
"" % (test_to_run, str(asserts_dct), result.parseoutcomes()))
err.__cause__ = e
raise err
def get_pytest_prepare_config(dynamic=False):
import py
import shlex
if dynamic:
from _pytest import config
with open(config.__file__) as cfg_file_original:
_capture = False
all_lines = []
for l in cfg_file_original.readlines():
if l.startswith("def _prepareconfig"):
_capture = True
all_lines.append(l)
elif _capture:
if l.startswith(" "):
all_lines.append(l)
else:
break
from _pytest.config import get_config
g = globals()
l = locals()
prepare_cfg_code = "".join(all_lines)
# print(prepare_cfg_code)
exec(prepare_cfg_code, l, g)
real_prepare_config = g['_prepareconfig']
else:
import sys
from _pytest.config import get_config
def real_prepare_config(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, string_types):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
return real_prepare_config
def process_cmdargs(cmdargs):
return shlex.split(cmdargs)
| 36.420118 | 108 | 0.605199 | import ast
import os
import shlex
import re
from os.path import join, dirname, isdir, exists
import pytest
from pytest_cases.common_mini_six import string_types
THIS_DIR = dirname(__file__)
tests_raw_folder = join(THIS_DIR, 'raw')
test_files = [f for f in os.listdir(tests_raw_folder) if not f.startswith('_')]
META_REGEX = re.compile(
"""^(# META
# )(?P<asserts_dct>.*)(
# END META)
.*""")
@pytest.mark.parametrize('test_to_run', test_files, ids=str)
def test_run_all_tests(test_to_run, testdir):
cmdargs = []
conf_file_path = None
test_to_run_path = join(tests_raw_folder, test_to_run)
if isdir(test_to_run_path):
test_folder_path = test_to_run_path
cmdargs_file_path = join(test_folder_path, "cmdargs.txt")
if exists(cmdargs_file_path):
with open(cmdargs_file_path) as c:
cmdargs = c.read()
cmdargs = process_cmdargs(cmdargs)
conf_file_path = join(test_folder_path, "conf.py")
if exists(conf_file_path):
with open(conf_file_path) as c:
cfg_contents = c.read()
print("\nConfig contents: %s" % cfg_contents)
testdir.makeconftest(cfg_contents)
test_to_run = test_to_run + ".py"
test_to_run_path = join(test_folder_path, test_to_run)
if not exists(test_to_run_path):
raise ValueError("Test file %s not found in folder %s" % (test_to_run, test_folder_path))
with open(test_to_run_path) as f:
test_file_contents = f.read()
testdir.makepyfile(test_file_contents)
m = META_REGEX.match(test_file_contents)
if m is None:
raise ValueError("test file '%s' does not contain the META-header" % test_to_run)
asserts_dct_str = m.groupdict()['asserts_dct']
asserts_dct = ast.literal_eval(asserts_dct_str)
print("\nTesting that running pytest on file %s with config file %s results in %s."
"" % (test_to_run, conf_file_path, str(asserts_dct)))
print("For debug, temp dir is: %s" % testdir.tmpdir)
from _pytest import config
jb_prepareconfig = config._prepareconfig
if jb_prepareconfig.__module__ != config.get_config.__module__:
config._prepareconfig = get_pytest_prepare_config()
result = testdir.runpytest(*cmdargs)
config._prepareconfig = jb_prepareconfig
try:
result.assert_outcomes(**asserts_dct)
except Exception as e:
err = Exception("Error while asserting that %s results in %s. Actual results: %s"
"" % (test_to_run, str(asserts_dct), result.parseoutcomes()))
err.__cause__ = e
raise err
def get_pytest_prepare_config(dynamic=False):
import py
import shlex
if dynamic:
from _pytest import config
with open(config.__file__) as cfg_file_original:
_capture = False
all_lines = []
for l in cfg_file_original.readlines():
if l.startswith("def _prepareconfig"):
_capture = True
all_lines.append(l)
elif _capture:
if l.startswith(" "):
all_lines.append(l)
else:
break
from _pytest.config import get_config
g = globals()
l = locals()
prepare_cfg_code = "".join(all_lines)
exec(prepare_cfg_code, l, g)
real_prepare_config = g['_prepareconfig']
else:
import sys
from _pytest.config import get_config
def real_prepare_config(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, string_types):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
return real_prepare_config
def process_cmdargs(cmdargs):
return shlex.split(cmdargs)
| true | true |
f72155b9712b48098172994163d1909c6bd06e2b | 959 | py | Python | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
"""
Wrapper of MC2PClient of Python
"""
def __init__(self):
"""
Initializes a MC2PClient getting key and secret key from DB
"""
from .models import MC2PConfig
try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key
secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key)
| 23.975 | 67 | 0.665276 | from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
VERSION = __version__
HTTP_HEADER_ENCODING = 'iso-8859-1'
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
def __init__(self):
from .models import MC2PConfig
try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key
secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key)
| true | true |
f721568c58cfd884ff05085466a0d5440678835f | 19,025 | py | Python | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 12 | 2021-07-22T15:08:13.000Z | 2022-03-10T08:15:56.000Z | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 1 | 2022-03-07T13:21:42.000Z | 2022-03-07T13:21:42.000Z | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 3 | 2021-11-26T06:26:24.000Z | 2022-02-14T01:23:44.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url, local_filename):
# borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('######################################################')
print('')
def download_by_name():
import argparse
parser = argparse.ArgumentParser(description="Use this to download pretrained models. CAREFUL: This script will "
"overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
print_license_warning()
download_and_install_pretrained_model_by_name(taskname)
def download_by_url():
import argparse
parser = argparse.ArgumentParser(
description="Use this to download pretrained models. This script is intended to download models via url only. "
"If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. "
"CAREFUL: This script will overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model.")
parser.add_argument("url", type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1' | 57.304217 | 191 | 0.595848 |
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('ownload_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1' | true | true |
f72157380ef02e33e2ef0f6f19e81eebfeeb2a1a | 5,318 | py | Python | optimizers/bohb_one_shot/plots/util.py | Mirofil/nasbench-1shot1 | 46637e259691ea2b1ab3b2f1cbbd309068f02cde | [
"Apache-2.0"
] | 65 | 2019-12-20T12:20:22.000Z | 2022-03-12T07:34:08.000Z | optimizers/bohb_one_shot/plots/util.py | crwhite14/nasbench-1shot1 | c34bf9c0222f07a30ba1518b3e52e120a3560aa4 | [
"Apache-2.0"
] | 8 | 2020-01-29T07:49:31.000Z | 2021-10-20T08:58:29.000Z | optimizers/bohb_one_shot/plots/util.py | crwhite14/nasbench-1shot1 | c34bf9c0222f07a30ba1518b3e52e120a3560aa4 | [
"Apache-2.0"
] | 18 | 2020-01-26T08:40:18.000Z | 2021-09-20T15:13:00.000Z | import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
#embed()
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all tracjectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df=df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by
# the performance of a random configuration
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
# find lowest performance in the data to update incumbent
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
| 33.446541 | 95 | 0.531403 | import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
df = pd.DataFrame().join(pandas_data_frames, how='outer')
df=df.fillna(method='ffill')
if default_value is None:
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
| true | true |
f721587995d4a908abf3416954f163ffd9986c6f | 5,391 | py | Python | tools/PolicyAnalysis/Businesses.py | Randal1936/FinancialSupervision | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | 1 | 2021-08-16T08:47:53.000Z | 2021-08-16T08:47:53.000Z | tools/PolicyAnalysis/Businesses.py | Randal1936/FSML | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | 16 | 2021-08-02T14:34:52.000Z | 2021-08-04T12:48:06.000Z | tools/PolicyAnalysis/Businesses.py | Randal1936/FinancialSupervision | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import xlwings as xw
from PolicyAnalysis import cptj as cj
"""
————————————————————
以下是使用 re 检索+ DFC 映射的数据处理写法
————————————————————
"""
class businesses_re:
def __init__(self, Data, userdict):
self.Data = Data
self.userdict = userdict
data = Data.copy()
# 先获取关键词字典
n = cj.txt_to_list(self.userdict)
# 把被监管的业务分类,做成字典映射
# 首先生成一个列表,标记一下关键词所在的位置
loc = [(0, 4), (4, 10), (10, 15), (15, 19), (19, 22), (22, 26), (26, 29), (29, 31), (31, 40),
(40, 41), (41, 42), (42, 43), (43, 44), (44, 45)]
# 然后遍历列表,按照标记的位置生成关键词切片,把同类的关键词映射到相同的数值
i = 0
keymap = {}
for rank in loc:
lst = n[rank[0]: rank[1]]
for item in lst:
keymap[item] = i
i += 1
# 情况一,对全部正文进行检索
result1 = cj.words_docs_freq(n, data)
dfc1 = result1['DFC']
dtm1_class = result1['DTM']
dtm1_final = cj.dfc_sort_filter(dfc1, keymap, '被监管业务-正文分类统计.xlsx')
# 情况二,对正文前十句话进行检索
# 造一个正文栏只包括正文前十句话的样本矩阵
tf = data
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
result2 = cj.words_docs_freq(n, tf)
dfc2 = result2['DFC']
dtm2_class = result2['DTM']
dtm2_final = cj.dfc_sort_filter(dfc2, keymap, '被监管业务-前十句话分类统计.xlsx')
# 情况三,仅对标题进行检索
# 首先把样本弄成一样的格式
# 建议用这种赋值+循环 iloc 赋值来新建样本
# 否则会报乱七八糟的错:不能用 slice 来更改原 DataFrame 值啊 blablabla
tf3 = data
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
# 生成词频统计结果
result3 = cj.words_docs_freq(n, tf3)
dfc3 = result3['DFC']
dtm3_class = result3['DTM']
dtm3_final = cj.dfc_sort_filter(dfc3, keymap, '被监管业务-标题分类统计.xlsx')
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务数'])
self.DTM_aver = dtm_aver_class # DTM 1、2、3 被监管业务数求均值
self.DTM_final = dtm_final # DTM 1、2、3 被监管业务种类数汇总
self.DTM1_class = dtm1_class # 按正文检索得到的 Doc-Term Matrix
self.DTM2_class = dtm2_class # 按前十句话检索的 Doc-Term Matrix
self.DTM3_class = dtm3_class # 按标题检索得到的 Doc-Term Matrix
"""
——————————————————————
以下是使用 jieba 检索+ DTM 映射的数据处理写法
——————————————————————
"""
class business_jieba:
def __init__(self, Data, userdict, indifile, indisheet, stopwords):
self.Data = Data
self.userdict = userdict
self.indifile = indifile
self.indisheet = indisheet
self.stopwords = stopwords
data = Data.copy()
# 导入指标文件
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.indisheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.columns = df_indi.loc[0]
df_indi.drop(0, axis=0, inplace=True)
df_indi.dropna(axis=0, how='all', inplace=True)
finally:
app.quit()
# 生成 Business 分类字典, {'Institution': [keyword1, keyword2, keyword3, ....], ....}
keymap = {}
for i in range(df_indi.shape[1]):
keymap[df_indi.columns[i]] = list(df_indi.iloc[:, i].dropna(''))
# 情况一,对全部正文进行检索
dtm1 = cj.jieba_vectorizer(data, self.userdict, self.stopwords).DTM
dtm1_result = cj.dtm_sort_filter(dtm1, keymap, '被监管业务-正文分类统计.xlsx')
dtm1_class = dtm1_result['DTM_class']
dtm1_final = dtm1_result['DTM_final']
# 情况二,对正文前十句话进行检索
# 造一个正文栏只包括正文前十句话的样本矩阵
tf = data.copy()
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
dtm2 = cj.jieba_vectorizer(tf, self.userdict, self.stopwords).DTM
dtm2_result = cj.dtm_sort_filter(dtm2, keymap, '被监管业务-前十句话分类统计.xlsx')
dtm2_class = dtm2_result['DTM_class']
dtm2_final = dtm2_result['DTM_final']
# 情况三,仅对标题进行检索
# 首先把样本弄成一样的格式
# 建议用这种赋值+循环 iloc 赋值来新建样本
# 否则会报乱七八糟的错:不能用 slice 来更改原 DataFrame 值啊 blablabla
tf3 = data.copy()
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
# 生成词频统计结果
dtm3 = cj.jieba_vectorizer(tf3, self.userdict, self.stopwords).DTM
dtm3_result = cj.dtm_sort_filter(dtm3, keymap)
dtm3_class = dtm3_result['DTM_class']
dtm3_final = dtm3_result['DTM_final']
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务种类数'])
self.DTM_aver = dtm_aver_class # DTM 1、2、3 被监管业务数求均值
self.DTM_final = dtm_final # DTM 1、2、3 被监管业务种类数汇总
self.DTM1_class = dtm1_class # 按正文检索得到的 Doc-Term Matrix
self.DTM2_class = dtm2_class # 按前十句话检索的 Doc-Term Matrix
self.DTM3_class = dtm3_class # 按标题检索得到的 Doc-Term Matrix
| 34.120253 | 101 | 0.584122 | import pandas as pd
import numpy as np
import xlwings as xw
from PolicyAnalysis import cptj as cj
class businesses_re:
def __init__(self, Data, userdict):
self.Data = Data
self.userdict = userdict
data = Data.copy()
n = cj.txt_to_list(self.userdict)
loc = [(0, 4), (4, 10), (10, 15), (15, 19), (19, 22), (22, 26), (26, 29), (29, 31), (31, 40),
(40, 41), (41, 42), (42, 43), (43, 44), (44, 45)]
i = 0
keymap = {}
for rank in loc:
lst = n[rank[0]: rank[1]]
for item in lst:
keymap[item] = i
i += 1
result1 = cj.words_docs_freq(n, data)
dfc1 = result1['DFC']
dtm1_class = result1['DTM']
dtm1_final = cj.dfc_sort_filter(dfc1, keymap, '被监管业务-正文分类统计.xlsx')
tf = data
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
result2 = cj.words_docs_freq(n, tf)
dfc2 = result2['DFC']
dtm2_class = result2['DTM']
dtm2_final = cj.dfc_sort_filter(dfc2, keymap, '被监管业务-前十句话分类统计.xlsx')
tf3 = data
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
result3 = cj.words_docs_freq(n, tf3)
dfc3 = result3['DFC']
dtm3_class = result3['DTM']
dtm3_final = cj.dfc_sort_filter(dfc3, keymap, '被监管业务-标题分类统计.xlsx')
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务数'])
self.DTM_aver = dtm_aver_class
self.DTM_final = dtm_final
self.DTM1_class = dtm1_class
self.DTM2_class = dtm2_class
self.DTM3_class = dtm3_class
class business_jieba:
def __init__(self, Data, userdict, indifile, indisheet, stopwords):
self.Data = Data
self.userdict = userdict
self.indifile = indifile
self.indisheet = indisheet
self.stopwords = stopwords
data = Data.copy()
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.indisheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.columns = df_indi.loc[0]
df_indi.drop(0, axis=0, inplace=True)
df_indi.dropna(axis=0, how='all', inplace=True)
finally:
app.quit()
keymap = {}
for i in range(df_indi.shape[1]):
keymap[df_indi.columns[i]] = list(df_indi.iloc[:, i].dropna(''))
dtm1 = cj.jieba_vectorizer(data, self.userdict, self.stopwords).DTM
dtm1_result = cj.dtm_sort_filter(dtm1, keymap, '被监管业务-正文分类统计.xlsx')
dtm1_class = dtm1_result['DTM_class']
dtm1_final = dtm1_result['DTM_final']
tf = data.copy()
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
dtm2 = cj.jieba_vectorizer(tf, self.userdict, self.stopwords).DTM
dtm2_result = cj.dtm_sort_filter(dtm2, keymap, '被监管业务-前十句话分类统计.xlsx')
dtm2_class = dtm2_result['DTM_class']
dtm2_final = dtm2_result['DTM_final']
tf3 = data.copy()
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
dtm3 = cj.jieba_vectorizer(tf3, self.userdict, self.stopwords).DTM
dtm3_result = cj.dtm_sort_filter(dtm3, keymap)
dtm3_class = dtm3_result['DTM_class']
dtm3_final = dtm3_result['DTM_final']
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务种类数'])
self.DTM_aver = dtm_aver_class
self.DTM_final = dtm_final
self.DTM1_class = dtm1_class
self.DTM2_class = dtm2_class
self.DTM3_class = dtm3_class
| true | true |
f7215945d58449184be2ce4c38342a88d9dfe3a5 | 1,847 | py | Python | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | 15 | 2020-03-09T11:56:13.000Z | 2022-02-10T15:03:01.000Z | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.8 on 2020-05-22 20:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management', '0001_initial'),
('accounts', '0001_initial'),
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.AddField(
model_name='clubuserprofile',
name='club',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Club', verbose_name='社团'),
),
migrations.AddField(
model_name='clubuserprofile',
name='student_class',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.StudentClass', verbose_name='班级'),
),
migrations.AddField(
model_name='clubuserprofile',
name='unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Unit', verbose_name='单位'),
),
migrations.AddField(
model_name='studentclubuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='studentclubuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 41.977273 | 256 | 0.650785 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management', '0001_initial'),
('accounts', '0001_initial'),
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.AddField(
model_name='clubuserprofile',
name='club',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Club', verbose_name='社团'),
),
migrations.AddField(
model_name='clubuserprofile',
name='student_class',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.StudentClass', verbose_name='班级'),
),
migrations.AddField(
model_name='clubuserprofile',
name='unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Unit', verbose_name='单位'),
),
migrations.AddField(
model_name='studentclubuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='studentclubuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| true | true |
f72159a07dd981d68fbdfa405294da73b86fbb56 | 593 | py | Python | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | null | null | null | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | null | null | null | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | 1 | 2020-09-24T22:54:52.000Z | 2020-09-24T22:54:52.000Z | from hash_map import HashMap
from asserts.asserts import assert_
def test_hash_map():
hash_map = HashMap(2)
# Test HashMap get and put
key = "abcde"
value = "ramiz"
hash_map.put(key, value)
output = hash_map.get(key)
assert_(value, output)
# Test size
assert_(1, hash_map.size())
# delete
hash_map.delete("abcde")
assert_(0, hash_map.size())
# Test Rehash
hash_map.put("mine", "mine")
# this should trigger rehashing
hash_map.put("hi", "hi")
assert_(2, hash_map.size())
print("All Tests Passed!")
test_hash_map()
| 19.129032 | 35 | 0.637437 | from hash_map import HashMap
from asserts.asserts import assert_
def test_hash_map():
hash_map = HashMap(2)
key = "abcde"
value = "ramiz"
hash_map.put(key, value)
output = hash_map.get(key)
assert_(value, output)
assert_(1, hash_map.size())
hash_map.delete("abcde")
assert_(0, hash_map.size())
hash_map.put("mine", "mine")
hash_map.put("hi", "hi")
assert_(2, hash_map.size())
print("All Tests Passed!")
test_hash_map()
| true | true |
f7215b16f3948f7d90fe03e4471250973c15ca0c | 639 | py | Python | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 387 | 2021-01-02T07:50:15.000Z | 2022-03-31T04:30:03.000Z | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 70 | 2021-05-04T18:28:18.000Z | 2022-03-31T14:14:52.000Z | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 83 | 2021-01-05T08:28:26.000Z | 2022-03-31T07:14:03.000Z | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : __init__.py
# Abstract :
# Current Version: 1.0.0
# Date : 2021-05-01
##################################################################################################
"""
from .test_utils import filter_punctuation, make_paths, show_result_table, results2json, eval_json
__all__ = [
"filter_punctuation",
"make_paths",
"show_result_table",
"results2json",
"eval_json"
]
| 31.95 | 98 | 0.450704 | from .test_utils import filter_punctuation, make_paths, show_result_table, results2json, eval_json
__all__ = [
"filter_punctuation",
"make_paths",
"show_result_table",
"results2json",
"eval_json"
]
| true | true |
f7215baa85a5f4afcd2f4643ea78bfc425bfefa5 | 2,586 | py | Python | app/back/settings/routes/api/__init__.py | jgphilpott/polyplot | c46861174ee5881dadffbfb2278d555462523547 | [
"MIT"
] | 5 | 2021-05-17T14:17:14.000Z | 2021-12-14T12:54:32.000Z | app/back/settings/routes/api/__init__.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 8 | 2020-02-09T02:48:41.000Z | 2021-05-16T04:57:02.000Z | app/back/settings/routes/api/__init__.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 2 | 2016-09-12T03:48:16.000Z | 2019-05-04T14:15:19.000Z | from front.tree.home.api.route import register_api_route
from front.tree.home.api.airports.route import register_api_airports_route
from front.tree.home.api.airports.airport.route import register_api_airport_route
from front.tree.home.api.cities.route import register_api_cities_route
from front.tree.home.api.cities.city.route import register_api_city_route
from front.tree.home.api.countries.route import register_api_countries_route
from front.tree.home.api.countries.country.route import register_api_country_route
from front.tree.home.api.graticules.route import register_api_graticules_route
from front.tree.home.api.graticules.graticule.route import register_api_graticule_route
from front.tree.home.api.indicators.route import register_api_indicators_route
from front.tree.home.api.indicators.indicator.route import register_api_indicator_route
from front.tree.home.api.lakes.route import register_api_lakes_route
from front.tree.home.api.lakes.lake.route import register_api_lake_route
from front.tree.home.api.maps.route import register_api_maps_route
from front.tree.home.api.maps.map.route import register_api_map_route
from front.tree.home.api.ports.route import register_api_ports_route
from front.tree.home.api.ports.port.route import register_api_port_route
from front.tree.home.api.railroads.route import register_api_railroads_route
from front.tree.home.api.railroads.railroad.route import register_api_railroad_route
from front.tree.home.api.rivers.route import register_api_rivers_route
from front.tree.home.api.rivers.river.route import register_api_river_route
from front.tree.home.api.roads.route import register_api_roads_route
from front.tree.home.api.roads.road.route import register_api_road_route
def register_api_routes(app):
register_api_route(app)
register_api_airports_route(app)
register_api_airport_route(app)
register_api_cities_route(app)
register_api_city_route(app)
register_api_countries_route(app)
register_api_country_route(app)
register_api_graticules_route(app)
register_api_graticule_route(app)
register_api_indicators_route(app)
register_api_indicator_route(app)
register_api_lakes_route(app)
register_api_lake_route(app)
register_api_maps_route(app)
register_api_map_route(app)
register_api_ports_route(app)
register_api_port_route(app)
register_api_railroads_route(app)
register_api_railroad_route(app)
register_api_rivers_route(app)
register_api_river_route(app)
register_api_roads_route(app)
register_api_road_route(app)
| 35.916667 | 87 | 0.842614 | from front.tree.home.api.route import register_api_route
from front.tree.home.api.airports.route import register_api_airports_route
from front.tree.home.api.airports.airport.route import register_api_airport_route
from front.tree.home.api.cities.route import register_api_cities_route
from front.tree.home.api.cities.city.route import register_api_city_route
from front.tree.home.api.countries.route import register_api_countries_route
from front.tree.home.api.countries.country.route import register_api_country_route
from front.tree.home.api.graticules.route import register_api_graticules_route
from front.tree.home.api.graticules.graticule.route import register_api_graticule_route
from front.tree.home.api.indicators.route import register_api_indicators_route
from front.tree.home.api.indicators.indicator.route import register_api_indicator_route
from front.tree.home.api.lakes.route import register_api_lakes_route
from front.tree.home.api.lakes.lake.route import register_api_lake_route
from front.tree.home.api.maps.route import register_api_maps_route
from front.tree.home.api.maps.map.route import register_api_map_route
from front.tree.home.api.ports.route import register_api_ports_route
from front.tree.home.api.ports.port.route import register_api_port_route
from front.tree.home.api.railroads.route import register_api_railroads_route
from front.tree.home.api.railroads.railroad.route import register_api_railroad_route
from front.tree.home.api.rivers.route import register_api_rivers_route
from front.tree.home.api.rivers.river.route import register_api_river_route
from front.tree.home.api.roads.route import register_api_roads_route
from front.tree.home.api.roads.road.route import register_api_road_route
def register_api_routes(app):
register_api_route(app)
register_api_airports_route(app)
register_api_airport_route(app)
register_api_cities_route(app)
register_api_city_route(app)
register_api_countries_route(app)
register_api_country_route(app)
register_api_graticules_route(app)
register_api_graticule_route(app)
register_api_indicators_route(app)
register_api_indicator_route(app)
register_api_lakes_route(app)
register_api_lake_route(app)
register_api_maps_route(app)
register_api_map_route(app)
register_api_ports_route(app)
register_api_port_route(app)
register_api_railroads_route(app)
register_api_railroad_route(app)
register_api_rivers_route(app)
register_api_river_route(app)
register_api_roads_route(app)
register_api_road_route(app)
| true | true |
f7215cea245000030d3205fd74b6800d175f2836 | 1,540 | py | Python | app.py | panubo/sensu-result-proxy | e7b8ee72bfbfadaaa798020630af6f6c5ed36e37 | [
"MIT"
] | null | null | null | app.py | panubo/sensu-result-proxy | e7b8ee72bfbfadaaa798020630af6f6c5ed36e37 | [
"MIT"
] | null | null | null | app.py | panubo/sensu-result-proxy | e7b8ee72bfbfadaaa798020630af6f6c5ed36e37 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import web
import requests
import json
from config import CONFIG_FILE, DEBUG, SENSU_API_URI, SENSU_API_USER, SENSU_API_PASS, load_config, validate_api_key
# SHA2
urls = (
'/', 'Index',
'/results/([A-Fa-f0-9]{64})', 'CheckCollector'
)
api_config = load_config(CONFIG_FILE)
class Index(object):
def GET(self):
return 'Welcome to the Sensu Check Collector!'
class CheckCollector(object):
def GET(self, api_key):
return web.nomethod()
def POST(self, api_key):
try:
data = json.loads(web.data())
if DEBUG: print(json.dumps(data))
except ValueError as e:
raise web.badrequest('Invalid JSON request data')
if not validate_api_key(api_key, api_config, data):
raise web.forbidden('Invalid API Key')
try:
headers = {'Content-type': 'application/json'}
if SENSU_API_USER and SENSU_API_PASS:
if DEBUG: print "AUTH: SENSU_API_USER, XXX"
auth=(SENSU_API_USER, SENSU_API_PASS)
else:
auth=None
r = requests.post(SENSU_API_URI, json=data, headers=headers, auth=auth)
r.raise_for_status()
return web.accepted()
except requests.exceptions.RequestException as e:
print(e)
raise web.internalerror('RequestException calling Sensu')
if __name__ == "__main__":
app = web.application(urls, globals())
web.config.debug = DEBUG
app.run()
| 25.245902 | 115 | 0.619481 |
import os
import web
import requests
import json
from config import CONFIG_FILE, DEBUG, SENSU_API_URI, SENSU_API_USER, SENSU_API_PASS, load_config, validate_api_key
urls = (
'/', 'Index',
'/results/([A-Fa-f0-9]{64})', 'CheckCollector'
)
api_config = load_config(CONFIG_FILE)
class Index(object):
def GET(self):
return 'Welcome to the Sensu Check Collector!'
class CheckCollector(object):
def GET(self, api_key):
return web.nomethod()
def POST(self, api_key):
try:
data = json.loads(web.data())
if DEBUG: print(json.dumps(data))
except ValueError as e:
raise web.badrequest('Invalid JSON request data')
if not validate_api_key(api_key, api_config, data):
raise web.forbidden('Invalid API Key')
try:
headers = {'Content-type': 'application/json'}
if SENSU_API_USER and SENSU_API_PASS:
if DEBUG: print "AUTH: SENSU_API_USER, XXX"
auth=(SENSU_API_USER, SENSU_API_PASS)
else:
auth=None
r = requests.post(SENSU_API_URI, json=data, headers=headers, auth=auth)
r.raise_for_status()
return web.accepted()
except requests.exceptions.RequestException as e:
print(e)
raise web.internalerror('RequestException calling Sensu')
if __name__ == "__main__":
app = web.application(urls, globals())
web.config.debug = DEBUG
app.run()
| false | true |
f7215cee8919dee9ba60b53f2fdaa5fd496bb91f | 656 | py | Python | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | 1 | 2022-02-28T15:38:57.000Z | 2022-02-28T15:38:57.000Z | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | null | null | null | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | null | null | null | import time
from M4i6622 import *
from Functions.functions import *
#4 functions to be used
def f0(x):
return sin(x)#sin_for_time(60000000, 40000000, 20000,10000, x)
def f1(x):
return sin(x)
def f2(x):
return sin(x,f=1000)
def f3(x):
return x
t0 = time.perf_counter()
M4i = M4i6622(channelNum=3,sampleRate=625,clockOut=True,referenceClock=False)
r = M4i.setSoftwareBuffer()
M4i.setupCard( (f0,f1,f2) )
tf = time.perf_counter() - t0
print("Done")
print("Time elapsed: {0: 10f} s".format(tf))
M4i.startCard()
r = M4i.stop()
print("Card has been stopped with error code: ",str(r))
| 15.255814 | 78 | 0.637195 | import time
from M4i6622 import *
from Functions.functions import *
def f0(x):
return sin(x)
def f1(x):
return sin(x)
def f2(x):
return sin(x,f=1000)
def f3(x):
return x
t0 = time.perf_counter()
M4i = M4i6622(channelNum=3,sampleRate=625,clockOut=True,referenceClock=False)
r = M4i.setSoftwareBuffer()
M4i.setupCard( (f0,f1,f2) )
tf = time.perf_counter() - t0
print("Done")
print("Time elapsed: {0: 10f} s".format(tf))
M4i.startCard()
r = M4i.stop()
print("Card has been stopped with error code: ",str(r))
| true | true |
f7215cf985916a431b4772d83a2ecae8b5f0c458 | 4,513 | py | Python | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | import group_frequency_oracle as freq
import linecache
import random
def query_on_adult_dim2(oraclePath,oracleInterval,queryPath,trueOraclePath,aggregation="count"):
# adult_2 equal 5 and 7
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
absrelativeError=0
absaverageError=0
for i in range(1,501):
for _ in range(10):
kthoracle=random.randint(1,500)
# kthoracle=_+1
oracle=freq.group_frequency_oracle(oraclePath, oracleInterval,k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
# print(i)
# print(queries[i-1])
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
count_value+=oracle[j][queries[i-1]]
true_count_value += trueOracle[j][queries[i - 1]]
answer[i-1]+=count_value
TrueAnswer[i-1]+=true_count_value
# averageError += count_value - true_count_value
# relativeError+= (abs(count_value - true_count_value))/max(0.001*n,float(true_count_value))
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
sum_value += j*oracle[j][queries[i-1]]
true_sum_value += j*trueOracle[j][queries[i - 1]]
answer[i-1]+=sum_value
TrueAnswer[i-1]+=true_sum_value
# averageError += sum_value - true_sum_value
# relativeError += (abs(sum_value - true_sum_value)) /max(0.001*n,float(true_sum_value))
answer[i - 1] /= 10.0
TrueAnswer[i - 1] /= 10.0
# absrelativeError += (abs(answer[i - 1] - TrueAnswer[i - 1])) / max(0.001 * n, float(TrueAnswer[i - 1]))
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
# absaverageError+= abs(answer[i - 1] - TrueAnswer[i - 1])
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
oraclePath = "experiments//movie_2_results.txt"
oracleInterval = 18
queryPath = "experiments//movie_query_5_7_9.txt"
trueOraclePath = "movie//movie5.txt"
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="count")
print(relativeError)
with open("experiments//final_movie_2_count.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans"+str(Trueans)+"\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
# f.write("absrelativeError:" + str(absrelativeError) + "\n")
# f.write("absaverageError:" + str(absaverageError) + "\n")
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="sum")
print(relativeError)
with open("experiments//final_movie_2_sum.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans" + str(Trueans) + "\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
# f.write("absrelativeError:" + str(absrelativeError) + "\n")
# f.write("absaverageError:" + str(absaverageError) + "\n")
| 48.010638 | 114 | 0.534899 | import group_frequency_oracle as freq
import linecache
import random
def query_on_adult_dim2(oraclePath,oracleInterval,queryPath,trueOraclePath,aggregation="count"):
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
absrelativeError=0
absaverageError=0
for i in range(1,501):
for _ in range(10):
kthoracle=random.randint(1,500)
oracle=freq.group_frequency_oracle(oraclePath, oracleInterval,k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
for j in oracle.keys():
count_value+=oracle[j][queries[i-1]]
true_count_value += trueOracle[j][queries[i - 1]]
answer[i-1]+=count_value
TrueAnswer[i-1]+=true_count_value
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
for j in oracle.keys():
sum_value += j*oracle[j][queries[i-1]]
true_sum_value += j*trueOracle[j][queries[i - 1]]
answer[i-1]+=sum_value
TrueAnswer[i-1]+=true_sum_value
answer[i - 1] /= 10.0
TrueAnswer[i - 1] /= 10.0
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
oraclePath = "experiments//movie_2_results.txt"
oracleInterval = 18
queryPath = "experiments//movie_query_5_7_9.txt"
trueOraclePath = "movie//movie5.txt"
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="count")
print(relativeError)
with open("experiments//final_movie_2_count.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans"+str(Trueans)+"\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="sum")
print(relativeError)
with open("experiments//final_movie_2_sum.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans" + str(Trueans) + "\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
| true | true |
f7215e1e2a0a47dea2178844ff22feb957540229 | 7,863 | py | Python | v2ex/babel/ext/upyun.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 161 | 2019-07-23T06:53:45.000Z | 2022-03-24T01:07:19.000Z | v2ex/babel/ext/upyun.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 1 | 2015-01-19T07:05:54.000Z | 2015-06-02T05:01:38.000Z | v2ex/babel/ext/upyun.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 26 | 2019-08-05T06:09:38.000Z | 2021-07-08T02:05:13.000Z | # -*- coding: utf8 -*-
import httplib
import md5 as imd5
import base64
import time
import re
METADATA_PREFIX = 'x-upyun-meta-'
DL = '/'
def md5(src):
m1 = imd5.new()
m1.update(src)
dest1 = m1.hexdigest()
return dest1
def md5file(fobj):
m = imd5.new()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
fobj.seek(0)
return m.hexdigest()
def merge_meta(headers, metadata):
final_headers = headers.copy()
for k in metadata.keys():
final_headers[METADATA_PREFIX + k] = metadata[k]
return final_headers
class UpYunException(Exception):
'''Raised when a Yupoo method fails.
More specific details will be included in the exception message
when thrown.
'''
#目录条目类
class FolderItem(object):
def __init__(self, filename, filetype, size, number):
self.filename = filename
self.filetype = filetype
self.size = size
self.number = number
class UpYun(object):
def __init__(self, bucket, username, password):
self.thehost = 'v0.api.upyun.com'
self.username = username
self.password = password
self.bucket = bucket
self.upAuth = False
self.debug = False
self._tmp_info = None
self.content_md5 = ''
self.file_secret = ''
#版本
def version(self):
return '1.0.1'
#设置待上传文件的 Content-MD5 值(如又拍云服务端收到的文件MD5值与用户设置的不一致,将回报 406 Not Acceptable 错误)
def setContentMD5(self, vaule):
self.content_md5 = vaule
#设置待上传文件的 访问密钥(注意:仅支持图片空!,设置密钥后,无法根据原文件URL直接访问,需带 URL 后面加上 (缩略图间隔标志符+密钥) 进行访问)
#如缩略图间隔标志符为 ! ,密钥为 bac,上传文件路径为 /folder/test.jpg ,那么该图片的对外访问地址为: http://空间域名/folder/test.jpg!bac
def setFileSecret(self, vaule):
self.file_secret = vaule
#设定api所调用的域名,包括电信,联通,网通,移动,铁通和自动选择
def setApiDomain(self,thehost):
self.thehost = thehost
#设定是否使用又拍签名
def setAuthType(self,upAuth):
self.upAuth = upAuth
def getList(self, path='', headers={}, metadata={}):
resp = self._net_worker( 'GET', DL+self.bucket+DL+path, '', headers, metadata)
return resp
def delete(self, path, headers={}, metadata={}):
resp = self._net_worker('DELETE',DL+self.bucket+DL+path, '',headers,metadata)
return resp
#获取空间占用大小
def getBucketUsage(self, path='', headers={}, metadata={}):
resp = self.getList(path+'?usage', headers, metadata)
try:
resp = int(resp.read())
except Exception, e:
resp = None
return resp
#获取某个目录的空间占用大小
#path目录路径
def getFolderUsage(self, path='', headers={}, metadata={}):
resp = self.getBucketUsage(path, headers, metadata)
return resp
#新建目录
#path目录路径
#[auto] 是否自动创建父级目录(最多10级)
def mkDir(self, path, auto=False, headers={}, metadata={}):
headers['folder'] = 'create'
if auto == True :
headers['mkdir'] = 'true'
resp = self._net_worker('POST', DL+self.bucket+DL+path, '', headers, metadata)
if resp.status == 200 :
return True
else :
return False
#删除目录
#path目录路径
def rmDir(self, path, headers={}, metadata={}):
resp = self.delete(path,headers,metadata)
if resp.status == 200 :
return True
else :
return False
#读取目录,返回FolderItem
#path目录路径
def readDir(self, path='', headers={}, metadata={}):
resp = self.getList(path, headers, metadata)
if resp.status == 200 :
result = re.sub('\t', '\/', resp.read())
result = re.sub('\n', '\/', result)
b = result.split('\/')
i=0
fis = []
while i+1<len(b):
fi = FolderItem(b[i],b[i+1],b[i+2],b[i+3])
fis.append(fi)
i+=4
return fis
else :
return False
#上传文件
#data 要上传的文件数据
#path 远程文件的位置
#[auto] 是否自动创建父级目录(最多10级)
def writeFile(self, path, data, auto = False, headers={}, metadata={}):
if auto == True :
headers['mkdir'] = 'true'
if type(data) != file :
headers['Content-Length'] = len(data)
resp = self._net_worker('PUT',DL+self.bucket+DL+path, data,headers,metadata)
self._tmp_info = None
if resp.status == 200 :
self._tmp_info = {}
self._tmp_info['x-upyun-width'] = resp.getheader('x-upyun-width')
self._tmp_info['x-upyun-height'] = resp.getheader('x-upyun-height')
self._tmp_info['x-upyun-frames'] = resp.getheader('x-upyun-frames')
self._tmp_info['x-upyun-file-type'] = resp.getheader('x-upyun-file-type')
return True
else :
return False
#获取上传文件后的信息(仅图片空间有返回数据)
#key 信息字段名(x-upyun-width、x-upyun-height、x-upyun-frames、x-upyun-file-type)
#return value or NULL
def getWritedFileInfo(self, key):
if self._tmp_info != None and self._tmp_info['x-upyun-width'] :
return self._tmp_info[key]
return None
#读取文件
#path 所要读取文件地远程路径
def readFile(self, path, headers={}, metadata={}):
resp = self.getList(path, headers, metadata)
if resp.status == 200 :
return resp.read()
else :
return None
#删除文件
#path 所要删除文件地远程路径
def deleteFile(self, path, headers={}, metadata={}):
resp = self.delete(path,headers,metadata)
if resp.status == 200 :
return True
else :
return False
#获取文件信息
#path 文件的远程路径
#返回格式为 {'date': unix time, 'type': file | folder, 'size': file size} 或 None
def getFileInfo(self, path, headers={}, metadata={}):
resp = self._net_worker( 'HEAD', DL+self.bucket+DL+path, '', headers, metadata)
if resp.status == 200 :
rs = {}
rs['type'] = resp.getheader('x-upyun-file-type')
rs['size'] = resp.getheader('x-upyun-file-size')
rs['date'] = resp.getheader('x-upyun-file-date')
return rs
else :
return None
def _net_worker(self, method, path, data='', headers={}, metadata={}):
connection = httplib.HTTPConnection(self.thehost)
if self.content_md5 != '':
headers['Content-MD5'] = self.content_md5
self.content_md5 = ''
if self.file_secret != '':
headers['Content-Secret'] = self.file_secret
self.file_secret = ''
final_headers = merge_meta(headers, metadata)
if self.upAuth:
self._add_upyun_auth_header(final_headers,method,path)
else :
self._basicAuth(final_headers,self.username,self.password)
connection.request(method, path , data, final_headers)
resp = connection.getresponse()
if self.debug and resp.status != 200 and method != "HEAD" :
raise UpYunException(u'ERROR: Code:%d,Message:%s'%(resp.status,resp.read()))
return resp
#又拍签名认证
def _add_upyun_auth_header(self, headers, method, uri):
headers['Date'] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime())
if 'Content-Length' in headers:
scr = md5(method+'&'+uri+'&'+headers['Date']+'&'
+str(headers['Content-Length'])+'&'+md5(self.password))
else :
scr = md5(method+'&'+uri+'&'+headers['Date']+'&'
+'0'+'&'+md5(self.password))
headers['Authorization'] = "UpYun %s:%s" % (self.username, scr)
def _basicAuth(self,headers, username, password):
encode = base64.encodestring(username+':'+password)
headers['Authorization'] = "Basic %s" % encode.strip()
| 31.452 | 104 | 0.56645 |
import httplib
import md5 as imd5
import base64
import time
import re
METADATA_PREFIX = 'x-upyun-meta-'
DL = '/'
def md5(src):
m1 = imd5.new()
m1.update(src)
dest1 = m1.hexdigest()
return dest1
def md5file(fobj):
m = imd5.new()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
fobj.seek(0)
return m.hexdigest()
def merge_meta(headers, metadata):
final_headers = headers.copy()
for k in metadata.keys():
final_headers[METADATA_PREFIX + k] = metadata[k]
return final_headers
class UpYunException(Exception):
'''Raised when a Yupoo method fails.
More specific details will be included in the exception message
when thrown.
'''
class FolderItem(object):
def __init__(self, filename, filetype, size, number):
self.filename = filename
self.filetype = filetype
self.size = size
self.number = number
class UpYun(object):
def __init__(self, bucket, username, password):
self.thehost = 'v0.api.upyun.com'
self.username = username
self.password = password
self.bucket = bucket
self.upAuth = False
self.debug = False
self._tmp_info = None
self.content_md5 = ''
self.file_secret = ''
def version(self):
return '1.0.1'
def setContentMD5(self, vaule):
self.content_md5 = vaule
def setFileSecret(self, vaule):
self.file_secret = vaule
def setApiDomain(self,thehost):
self.thehost = thehost
def setAuthType(self,upAuth):
self.upAuth = upAuth
def getList(self, path='', headers={}, metadata={}):
resp = self._net_worker( 'GET', DL+self.bucket+DL+path, '', headers, metadata)
return resp
def delete(self, path, headers={}, metadata={}):
resp = self._net_worker('DELETE',DL+self.bucket+DL+path, '',headers,metadata)
return resp
def getBucketUsage(self, path='', headers={}, metadata={}):
resp = self.getList(path+'?usage', headers, metadata)
try:
resp = int(resp.read())
except Exception, e:
resp = None
return resp
def getFolderUsage(self, path='', headers={}, metadata={}):
resp = self.getBucketUsage(path, headers, metadata)
return resp
def mkDir(self, path, auto=False, headers={}, metadata={}):
headers['folder'] = 'create'
if auto == True :
headers['mkdir'] = 'true'
resp = self._net_worker('POST', DL+self.bucket+DL+path, '', headers, metadata)
if resp.status == 200 :
return True
else :
return False
def rmDir(self, path, headers={}, metadata={}):
resp = self.delete(path,headers,metadata)
if resp.status == 200 :
return True
else :
return False
def readDir(self, path='', headers={}, metadata={}):
resp = self.getList(path, headers, metadata)
if resp.status == 200 :
result = re.sub('\t', '\/', resp.read())
result = re.sub('\n', '\/', result)
b = result.split('\/')
i=0
fis = []
while i+1<len(b):
fi = FolderItem(b[i],b[i+1],b[i+2],b[i+3])
fis.append(fi)
i+=4
return fis
else :
return False
def writeFile(self, path, data, auto = False, headers={}, metadata={}):
if auto == True :
headers['mkdir'] = 'true'
if type(data) != file :
headers['Content-Length'] = len(data)
resp = self._net_worker('PUT',DL+self.bucket+DL+path, data,headers,metadata)
self._tmp_info = None
if resp.status == 200 :
self._tmp_info = {}
self._tmp_info['x-upyun-width'] = resp.getheader('x-upyun-width')
self._tmp_info['x-upyun-height'] = resp.getheader('x-upyun-height')
self._tmp_info['x-upyun-frames'] = resp.getheader('x-upyun-frames')
self._tmp_info['x-upyun-file-type'] = resp.getheader('x-upyun-file-type')
return True
else :
return False
def getWritedFileInfo(self, key):
if self._tmp_info != None and self._tmp_info['x-upyun-width'] :
return self._tmp_info[key]
return None
def readFile(self, path, headers={}, metadata={}):
resp = self.getList(path, headers, metadata)
if resp.status == 200 :
return resp.read()
else :
return None
def deleteFile(self, path, headers={}, metadata={}):
resp = self.delete(path,headers,metadata)
if resp.status == 200 :
return True
else :
return False
def getFileInfo(self, path, headers={}, metadata={}):
resp = self._net_worker( 'HEAD', DL+self.bucket+DL+path, '', headers, metadata)
if resp.status == 200 :
rs = {}
rs['type'] = resp.getheader('x-upyun-file-type')
rs['size'] = resp.getheader('x-upyun-file-size')
rs['date'] = resp.getheader('x-upyun-file-date')
return rs
else :
return None
def _net_worker(self, method, path, data='', headers={}, metadata={}):
connection = httplib.HTTPConnection(self.thehost)
if self.content_md5 != '':
headers['Content-MD5'] = self.content_md5
self.content_md5 = ''
if self.file_secret != '':
headers['Content-Secret'] = self.file_secret
self.file_secret = ''
final_headers = merge_meta(headers, metadata)
if self.upAuth:
self._add_upyun_auth_header(final_headers,method,path)
else :
self._basicAuth(final_headers,self.username,self.password)
connection.request(method, path , data, final_headers)
resp = connection.getresponse()
if self.debug and resp.status != 200 and method != "HEAD" :
raise UpYunException(u'ERROR: Code:%d,Message:%s'%(resp.status,resp.read()))
return resp
def _add_upyun_auth_header(self, headers, method, uri):
headers['Date'] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime())
if 'Content-Length' in headers:
scr = md5(method+'&'+uri+'&'+headers['Date']+'&'
+str(headers['Content-Length'])+'&'+md5(self.password))
else :
scr = md5(method+'&'+uri+'&'+headers['Date']+'&'
+'0'+'&'+md5(self.password))
headers['Authorization'] = "UpYun %s:%s" % (self.username, scr)
def _basicAuth(self,headers, username, password):
encode = base64.encodestring(username+':'+password)
headers['Authorization'] = "Basic %s" % encode.strip()
| false | true |
f7215e55cf136f7e2d8b5021a9fd804c6a6a0820 | 42,837 | py | Python | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
"""
Create Bucket with all possible durability_levels and make sure
durability levels are honored for document CRUDs
- Will test for all bucket types (Couchbase, Ephemeral, Memcached)
- With all possible d_levels for bucket_durability
- Perform doc insert for each bucket to validate the sync_writes
"""
# Create cb_cli session object
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Remove unsupported replica string in case if MC bucket
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
self.bucket_util.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.info(rest_exception)
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
"""
Create Buckets with NONE durability level.
Attempts sync_write with different durability_levels and validate
CRUDs are honored with respective durability_levels set from clients
"""
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Index for doc_gen to avoid creating/deleting same docs across d_level
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
CRUDs from client without explicitly setting the durability and
validate the ops to make sure respective durability is honored
"""
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
Sub_doc CRUDs from client without durability settings and
validate the ops to make sure respective durability is honored
"""
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
# SDK client to perform sub_doc ops
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
# Perform sub_doc CRUD
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Subdoc_delete and verify
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Close SDK client
client.close()
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's durability_level and validate
"""
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
# Ephemeral case
if self.d_level_order[durability_index] not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's d_level and validate
"""
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
"""
Create buckets with None durability levels and perform doc_ops.
Update bucket_durability using diag-eval with/without doc_ops in
parallel and validate the doc_ops results.
"""
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
# Override sdk_timeout to max value to avoid TimeoutExceptions
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
# Load basic docs to support other CRUDs
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
# Initiate CRUD task objects
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
# Start CRUD and update bucket-durability as specified
# by config param 'update_during_ops'
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
# Close clients in unused tasks
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
# Update bucket durability
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats()
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
"""
1. Create Bucket with durability level set.
2. Bring down a node such that durability CRUD will wait
3. Perform doc_op and update bucket_level_durability
4. Revert scenario induced in step#2, such that doc_op will complete
5. Make sure doc_ops in step#3 went through using prev. d-level
"""
# Starting from max_durability levels because to iterate
# all lower levels for doc_ops with level update
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
# Loop to update all other durability levels
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
# Pick a random node to perform error sim and load
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# Simulate target error condition
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats()
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
# Revert the induced error condition
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
"""
Test to simulate sync_write_in_progress error and validate the behavior
This will validate failure in majority of nodes, where durability will
surely fail for all CRUDs
1. Select nodes to simulate the error which will affect the durability
2. Enable the specified error_scenario on the selected nodes
3. Perform individual CRUDs and verify sync_write_in_progress errors
4. Validate the end results
"""
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
# Set crud_batch_size
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
# Fetch target_vbs for CRUDs
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
# Variable to hold one of the doc_generator objects
gen_loader_1 = None
gen_loader_2 = None
# Initialize doc_generators to use for testing
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
# Start CRUD operation based on the given 'doc_op' type
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
# Load required docs for doc_op_1 in case of type != create
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Initialize tasks and store the task objects
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# SDK client for performing individual ops
client = SDKClient([self.cluster.master], bucket)
# Perform specified action
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
# Perform specified CRUD operation on sync_write docs
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for fail_fast in [True, False]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if fail_fast:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
# Validate the returned error from the SDK
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
# Try reading the value in SyncWrite in-progress state
fail = client.crud("read", key)
if doc_ops[0] == "create":
# Expected KeyNotFound in case of CREATE operation
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
# Expects prev value in case of other operations
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
# Revert the introduced error condition
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
# Wait for doc_loader_task to complete
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Disconnect the client
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
# Select nodes to affect and open required shell_connections
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
# Skip of Bucket durability level 'None'
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Bucket deletion
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
"""
Creates bucket with bucket level durability.
Perform CRUD operations and make sure all the operations are
done as sync_write in server.
Note: Passing persistTo/replicateTo will test the observe scenarios
"""
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
"""
Create bucket with replica > num_kv_nodes.
Perform doc insert to make sure we get TimeoutException due to
durability_impossible from the server.
"""
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
| 44.855497 | 82 | 0.572682 | from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
self.bucket_util.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.info(rest_exception)
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
for d_level in self.get_supported_durability_for_bucket():
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
self.cb_stat_verify(verification_dict)
client.close()
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
if self.d_level_order[durability_index] not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats()
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats()
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
gen_loader_1 = None
gen_loader_2 = None
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
client = SDKClient([self.cluster.master], bucket)
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for fail_fast in [True, False]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if fail_fast:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
fail = client.crud("read", key)
if doc_ops[0] == "create":
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
| true | true |
f7215eaffbd11774be2c8286cbada91c47ac6b09 | 1,136 | py | Python | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | import html2markdown as h2m
import urllib.request
import re
from sys import argv, exit
if __name__ == '__main__':
if len(argv) not in (2, 3): exit(1)
day = argv[1]
if int(day) not in range(1, 24):
exit(1)
destinationFilePath = argv[2]
url = "https://adventofcode.com/2020/day/" + day
response = urllib.request.urlopen(url)
content = str(response.read())
taskBegin = '<article class="day-desc">'
taskEnd = '</article>'
htmlTask = content[content.index(taskBegin) + len(taskBegin):content.index(taskEnd)]
print(htmlTask)
markdownTask = h2m.convert(htmlTask)
markdownTask = markdownTask.replace('\\n', '\n\t')
markdownTask = markdownTask.replace("\\\\'", "'")
markdownTask = re.sub('<em.*?>', '**', markdownTask)
markdownTask = re.sub('</em>', '**', markdownTask)
markdownTask = re.sub('(\[.*?\]\()/(.*?\))', r'\1https://adventofcode.com/\2', markdownTask)
markdownTask = re.sub('<.*?>', '', markdownTask)
destinationFile = open(destinationFilePath, "w")
destinationFile.write(format(markdownTask))
destinationFile.close()
| 28.4 | 96 | 0.626761 | import html2markdown as h2m
import urllib.request
import re
from sys import argv, exit
if __name__ == '__main__':
if len(argv) not in (2, 3): exit(1)
day = argv[1]
if int(day) not in range(1, 24):
exit(1)
destinationFilePath = argv[2]
url = "https://adventofcode.com/2020/day/" + day
response = urllib.request.urlopen(url)
content = str(response.read())
taskBegin = '<article class="day-desc">'
taskEnd = '</article>'
htmlTask = content[content.index(taskBegin) + len(taskBegin):content.index(taskEnd)]
print(htmlTask)
markdownTask = h2m.convert(htmlTask)
markdownTask = markdownTask.replace('\\n', '\n\t')
markdownTask = markdownTask.replace("\\\\'", "'")
markdownTask = re.sub('<em.*?>', '**', markdownTask)
markdownTask = re.sub('</em>', '**', markdownTask)
markdownTask = re.sub('(\[.*?\]\()/(.*?\))', r'\1https://adventofcode.com/\2', markdownTask)
markdownTask = re.sub('<.*?>', '', markdownTask)
destinationFile = open(destinationFilePath, "w")
destinationFile.write(format(markdownTask))
destinationFile.close()
| true | true |
f7215eeae590fcac6dd15874392f9bd3361b29e0 | 1,294 | py | Python | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ...utils import cached_property
from ..._compat import to_unicode, to_string, unicode_compatible
@unicode_compatible
class Sentence(object):
__slots__ = ("_text", "_cached_property_words", "_tokenizer", "_is_heading",)
def __init__(self, text, tokenizer, is_heading=False):
self._text = to_unicode(text).strip()
self._tokenizer = tokenizer
self._is_heading = bool(is_heading)
@cached_property
def words(self):
return self._tokenizer.to_words(self._text)
@property
def is_heading(self):
return self._is_heading
def __eq__(self, sentence):
assert isinstance(sentence, Sentence)
return self._is_heading is sentence._is_heading and self._text == sentence._text
def __ne__(self, sentence):
return not self.__eq__(sentence)
def __hash__(self):
return hash((self._is_heading, self._text))
def __unicode__(self):
return self._text
def __repr__(self):
return to_string("<%s: %s>") % (
"Heading" if self._is_heading else "Sentence",
self.__str__()
)
| 28.755556 | 89 | 0.651468 |
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ...utils import cached_property
from ..._compat import to_unicode, to_string, unicode_compatible
@unicode_compatible
class Sentence(object):
__slots__ = ("_text", "_cached_property_words", "_tokenizer", "_is_heading",)
def __init__(self, text, tokenizer, is_heading=False):
self._text = to_unicode(text).strip()
self._tokenizer = tokenizer
self._is_heading = bool(is_heading)
@cached_property
def words(self):
return self._tokenizer.to_words(self._text)
@property
def is_heading(self):
return self._is_heading
def __eq__(self, sentence):
assert isinstance(sentence, Sentence)
return self._is_heading is sentence._is_heading and self._text == sentence._text
def __ne__(self, sentence):
return not self.__eq__(sentence)
def __hash__(self):
return hash((self._is_heading, self._text))
def __unicode__(self):
return self._text
def __repr__(self):
return to_string("<%s: %s>") % (
"Heading" if self._is_heading else "Sentence",
self.__str__()
)
| true | true |
f7216012bdabcc6a4f76ac1521c5236c58f42c7a | 393 | py | Python | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | 10 | 2021-12-09T04:39:03.000Z | 2022-02-07T05:42:29.000Z | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | 2 | 2022-02-07T18:12:54.000Z | 2022-02-10T10:27:37.000Z | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | null | null | null | from django.urls import path
from .api import *
from knox import views as knox_views
urlpatterns = [
#domain.dn/api/v1/register/ | POST
path('register/' , SignUpAPI.as_view() , name='register'),
#domain.dn/api/v1/register/ | POST
path('login/' , SignInAPI.as_view() , name='login'),
#domain.dn/api/v1/user | GET
path('user/', MainUser.as_view() , name='user'),
] | 21.833333 | 62 | 0.64631 | from django.urls import path
from .api import *
from knox import views as knox_views
urlpatterns = [
path('register/' , SignUpAPI.as_view() , name='register'),
path('login/' , SignInAPI.as_view() , name='login'),
path('user/', MainUser.as_view() , name='user'),
] | true | true |
f7216046bb8fc44df661da3c65b4c665932b2bf6 | 1,845 | py | Python | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | from simphony.library import siepic
from simphony.netlist import Subcircuit
def ring_double_siepic(
wg_width=0.5,
gap=0.2,
length_x=4,
bend_radius=5,
length_y=2,
coupler=siepic.ebeam_dc_halfring_straight,
straight=siepic.ebeam_wg_integral_1550,
terminator=siepic.ebeam_terminator_te1550,
):
r"""Return double bus ring made of two couplers (ct: top, cb: bottom).
connected with two vertical straights (wyl: left, wyr: right)
.. code::
--==ct==--
| |
wl wr length_y
| |
--==cb==-- gap
length_x
drop n1 _ _ n3 cdrop
\______/
______
in n2 _/ \_n4
| |
n1 | | n3
\______/
______
in n2 _/ \_n4 output
"""
straight = straight() if callable(straight) else straight
coupler = coupler() if callable(coupler) else coupler
# Create the circuit, add all individual instances
circuit = Subcircuit("mzi")
circuit.add([(coupler, "ct"), (coupler, "cb"), (straight, "wl"), (straight, "wr")])
# Circuits can be connected using the elements' string names:
circuit.connect_many(
[
("cb", "n1", "wl", "n1"),
("wl", "n2", "ct", "n2"),
("ct", "n4", "wr", "n1"),
("wr", "n2", "cb", "n3"),
]
)
circuit.elements["cb"].pins["n2"] = "input"
circuit.elements["cb"].pins["n4"] = "output"
circuit.elements["ct"].pins["n1"] = "drop"
circuit.elements["ct"].pins["n3"] = "cdrop"
return circuit
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulationsimphony import plot_circuit
c = ring_double_siepic()
plot_circuit(c)
plt.show()
| 25.273973 | 87 | 0.539837 | from simphony.library import siepic
from simphony.netlist import Subcircuit
def ring_double_siepic(
wg_width=0.5,
gap=0.2,
length_x=4,
bend_radius=5,
length_y=2,
coupler=siepic.ebeam_dc_halfring_straight,
straight=siepic.ebeam_wg_integral_1550,
terminator=siepic.ebeam_terminator_te1550,
):
straight = straight() if callable(straight) else straight
coupler = coupler() if callable(coupler) else coupler
circuit = Subcircuit("mzi")
circuit.add([(coupler, "ct"), (coupler, "cb"), (straight, "wl"), (straight, "wr")])
circuit.connect_many(
[
("cb", "n1", "wl", "n1"),
("wl", "n2", "ct", "n2"),
("ct", "n4", "wr", "n1"),
("wr", "n2", "cb", "n3"),
]
)
circuit.elements["cb"].pins["n2"] = "input"
circuit.elements["cb"].pins["n4"] = "output"
circuit.elements["ct"].pins["n1"] = "drop"
circuit.elements["ct"].pins["n3"] = "cdrop"
return circuit
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulationsimphony import plot_circuit
c = ring_double_siepic()
plot_circuit(c)
plt.show()
| true | true |
f72160586c3494b35606c754418a7f75fc368e1d | 10,517 | py | Python | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | null | null | null | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | null | null | null | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | 1 | 2021-01-15T10:04:33.000Z | 2021-01-15T10:04:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: data.py
@Time: 2018/10/13 6:21 PM
Modified by
@Author: An Tao
@Contact: ta19@mails.tsinghua.edu.cn
@Time: 2020/2/27 9:32 PM
"""
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def download_S3DIS():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget --no-check-certificate %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version')):
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')):
print('Please download Stanford3dDataset_v1.2_Aligned_Version.zip \
from https://goo.gl/forms/4SoGp4KtH1jfRqEj2 and place it under data/')
sys.exit(0)
else:
zippath = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')
os.system('unzip %s' % (zippath))
os.system('rm %s' % (zippath))
def load_data_cls(partition):
download_modelnet40()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def prepare_test_data_semseg():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(os.path.join(DATA_DIR, 'stanford_indoor3d')):
os.system('python prepare_data/collect_indoor3d_data.py')
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')):
os.system('python prepare_data/gen_indoor3d_h5.py')
def load_data_semseg(partition, test_area):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
download_S3DIS()
prepare_test_data_semseg()
if partition == 'train':
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')
else:
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')
with open(os.path.join(data_dir, "all_files.txt")) as f:
all_files = [line.rstrip() for line in f]
with open(os.path.join(data_dir, "room_filelist.txt")) as f:
room_filelist = [line.rstrip() for line in f]
data_batchlist, label_batchlist = [], []
for f in all_files:
file = h5py.File(os.path.join(DATA_DIR, f), 'r+')
data = file["data"][:]
label = file["label"][:]
data_batchlist.append(data)
label_batchlist.append(label)
data_batches = np.concatenate(data_batchlist, 0)
seg_batches = np.concatenate(label_batchlist, 0)
test_area_name = "Area_" + test_area
train_idxs, test_idxs = [], []
for i, room_name in enumerate(room_filelist):
if test_area_name in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
if partition == 'train':
all_data = data_batches[train_idxs, ...]
all_seg = seg_batches[train_idxs, ...]
else:
all_data = data_batches[test_idxs, ...]
all_seg = seg_batches[test_idxs, ...]
return all_data, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix) # random rotation (x,z)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
# pointcloud = translate_pointcloud(pointcloud)
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
class S3DIS(Dataset):
def __init__(self, num_points=4096, partition='train', test_area='1'):
self.data, self.seg = load_data_semseg(partition, test_area)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
seg = self.seg[item][:self.num_points]
if self.partition == 'train':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
seg = torch.LongTensor(seg)
return pointcloud, seg
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
data, label = train[0]
print(data.shape)
print(label.shape)
trainval = ShapeNetPart(2048, 'trainval')
test = ShapeNetPart(2048, 'test')
data, label, seg = trainval[0]
print(data.shape)
print(label.shape)
print(seg.shape)
train = S3DIS(4096)
test = S3DIS(4096, 'test')
data, seg = train[0]
print(data.shape)
print(seg.shape)
| 37.03169 | 105 | 0.629837 |
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def download_S3DIS():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget --no-check-certificate %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version')):
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')):
print('Please download Stanford3dDataset_v1.2_Aligned_Version.zip \
from https://goo.gl/forms/4SoGp4KtH1jfRqEj2 and place it under data/')
sys.exit(0)
else:
zippath = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')
os.system('unzip %s' % (zippath))
os.system('rm %s' % (zippath))
def load_data_cls(partition):
download_modelnet40()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def prepare_test_data_semseg():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(os.path.join(DATA_DIR, 'stanford_indoor3d')):
os.system('python prepare_data/collect_indoor3d_data.py')
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')):
os.system('python prepare_data/gen_indoor3d_h5.py')
def load_data_semseg(partition, test_area):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
download_S3DIS()
prepare_test_data_semseg()
if partition == 'train':
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')
else:
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')
with open(os.path.join(data_dir, "all_files.txt")) as f:
all_files = [line.rstrip() for line in f]
with open(os.path.join(data_dir, "room_filelist.txt")) as f:
room_filelist = [line.rstrip() for line in f]
data_batchlist, label_batchlist = [], []
for f in all_files:
file = h5py.File(os.path.join(DATA_DIR, f), 'r+')
data = file["data"][:]
label = file["label"][:]
data_batchlist.append(data)
label_batchlist.append(label)
data_batches = np.concatenate(data_batchlist, 0)
seg_batches = np.concatenate(label_batchlist, 0)
test_area_name = "Area_" + test_area
train_idxs, test_idxs = [], []
for i, room_name in enumerate(room_filelist):
if test_area_name in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
if partition == 'train':
all_data = data_batches[train_idxs, ...]
all_seg = seg_batches[train_idxs, ...]
else:
all_data = data_batches[test_idxs, ...]
all_seg = seg_batches[test_idxs, ...]
return all_data, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
class S3DIS(Dataset):
def __init__(self, num_points=4096, partition='train', test_area='1'):
self.data, self.seg = load_data_semseg(partition, test_area)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
seg = self.seg[item][:self.num_points]
if self.partition == 'train':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
seg = torch.LongTensor(seg)
return pointcloud, seg
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
data, label = train[0]
print(data.shape)
print(label.shape)
trainval = ShapeNetPart(2048, 'trainval')
test = ShapeNetPart(2048, 'test')
data, label, seg = trainval[0]
print(data.shape)
print(label.shape)
print(seg.shape)
train = S3DIS(4096)
test = S3DIS(4096, 'test')
data, seg = train[0]
print(data.shape)
print(seg.shape)
| true | true |
f721616e2e38048326fa72960040fcebc5347540 | 1,016 | py | Python | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | wip-abramson/PySyft | c321b26ce1aa3c969793874e663a8a46b1228dd1 | [
"Apache-1.1"
] | 1 | 2021-08-31T11:37:19.000Z | 2021-08-31T11:37:19.000Z | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | karapto/PySyft | 2940bfebb3e0f37a1b7451cf9581c41917534ed6 | [
"Apache-1.1"
] | null | null | null | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | karapto/PySyft | 2940bfebb3e0f37a1b7451cf9581c41917534ed6 | [
"Apache-1.1"
] | null | null | null | # third party
import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
# syft absolute
import syft as sy
from syft.experimental_flags import flags
sy.load("sklearn")
sy.load("numpy")
@pytest.mark.vendor(lib="sklearn")
@pytest.mark.parametrize("arrow_backend", [True, False])
def test_logistic_model_serde(
arrow_backend: bool, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([0, 0, 1, 1])
clf = LogisticRegression(random_state=0).fit(X, y)
clf_remote = clf.send(root_client)
clf_2 = clf_remote.get()
dict_1 = vars(clf)
dict_2 = vars(clf_2)
for key in dict_1.keys():
if type(dict_1[key]) == float:
assert abs(dict_1[key] - dict_2[key]) < 0.0001
elif type(dict_1[key]) == np.ndarray:
assert dict_1[key].all() == dict_2[key].all()
else:
assert dict_1[key] == dict_2[key]
| 26.736842 | 61 | 0.649606 |
import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
import syft as sy
from syft.experimental_flags import flags
sy.load("sklearn")
sy.load("numpy")
@pytest.mark.vendor(lib="sklearn")
@pytest.mark.parametrize("arrow_backend", [True, False])
def test_logistic_model_serde(
arrow_backend: bool, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([0, 0, 1, 1])
clf = LogisticRegression(random_state=0).fit(X, y)
clf_remote = clf.send(root_client)
clf_2 = clf_remote.get()
dict_1 = vars(clf)
dict_2 = vars(clf_2)
for key in dict_1.keys():
if type(dict_1[key]) == float:
assert abs(dict_1[key] - dict_2[key]) < 0.0001
elif type(dict_1[key]) == np.ndarray:
assert dict_1[key].all() == dict_2[key].all()
else:
assert dict_1[key] == dict_2[key]
| true | true |
f72161c03ab784a2ce4d00015d797e74b8a25925 | 2,042 | py | Python | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.template import loader
from django.contrib import messages
from django.views import generic
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Door, Area, AccessRule, User
class IndexView(TemplateView):
template_name = 'SecuriTree/index.html'
class HomeView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/home.html'
class HierarchyView(LoginRequiredMixin,generic.ListView):
model = Area
template_name = 'SecuriTree/hierarchy.html'
context_object_name = 'area_list'
def get_queryset(self):
return Area.objects.filter(parent_area__isnull=True).order_by('id')
class DoorManageView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/manage_doors.html'
class DoorsView(LoginRequiredMixin,generic.ListView):
model = Door
template_name = 'SecuriTree/all_doors.html'
context_object_name = 'door_list'
def get_queryset(self):
return Door.objects.all()
@login_required
def door_form(request):
r_action = request.GET['action']
if r_action == 'unlock':
action = 'unlock'
else:
action = 'lock'
return render(request, 'SecuriTree/door_form.html', {'action':action})
@login_required
def door_status(request):
door_id = request.POST['doorid']
status = request.POST['status']
door = get_object_or_404(Door, pk=door_id)
# door = Door.objects.filter(pk = door_id).first()
door.status = status;
door.save()
if status == 'closed':
msg = 'Door ' + door.id + ' successfully locked.'
else:
msg = 'Door ' + door.id + ' successfully unlocked.'
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 27.972603 | 75 | 0.731636 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.template import loader
from django.contrib import messages
from django.views import generic
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Door, Area, AccessRule, User
class IndexView(TemplateView):
template_name = 'SecuriTree/index.html'
class HomeView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/home.html'
class HierarchyView(LoginRequiredMixin,generic.ListView):
model = Area
template_name = 'SecuriTree/hierarchy.html'
context_object_name = 'area_list'
def get_queryset(self):
return Area.objects.filter(parent_area__isnull=True).order_by('id')
class DoorManageView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/manage_doors.html'
class DoorsView(LoginRequiredMixin,generic.ListView):
model = Door
template_name = 'SecuriTree/all_doors.html'
context_object_name = 'door_list'
def get_queryset(self):
return Door.objects.all()
@login_required
def door_form(request):
r_action = request.GET['action']
if r_action == 'unlock':
action = 'unlock'
else:
action = 'lock'
return render(request, 'SecuriTree/door_form.html', {'action':action})
@login_required
def door_status(request):
door_id = request.POST['doorid']
status = request.POST['status']
door = get_object_or_404(Door, pk=door_id)
door.status = status;
door.save()
if status == 'closed':
msg = 'Door ' + door.id + ' successfully locked.'
else:
msg = 'Door ' + door.id + ' successfully unlocked.'
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| true | true |
f721628ea6b0b18873ff1f9593f52a8c6a6f14af | 6,273 | py | Python | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | from thenewboston_node.business_logic.blockchain.file_blockchain import FileBlockchain
from thenewboston_node.business_logic.models import (
AccountState, Block, Node, NodeDeclarationSignedChangeRequest, PrimaryValidatorSchedule,
PrimaryValidatorScheduleSignedChangeRequest
)
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.business_logic.tests.baker_factories import baker
from thenewboston_node.core.utils.cryptography import generate_key_pair
def test_no_pv_schedule(blockchain_directory, blockchain_genesis_state):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() is None
assert blockchain.get_primary_validator(0) is None
assert blockchain.get_primary_validator(10) is None
def test_can_get_pv_from_blockchain_genesis_state(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_pv_from_from_blocks(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
signing_key = user_account_key_pair.private
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['https://127.0.0.1:8555/'], fee_amount=3, signing_key=signing_key
)
node = request.message.node
assert node.identifier
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, signing_key)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_node_from_genesis_state_and_pv_from_blocks(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_overridden_pv(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
another_key_pair = generate_key_pair()
another_node = baker.make(Node, identifier=another_key_pair.public)
blockchain_genesis_state.account_states[another_key_pair.public] = AccountState(node=another_node)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, another_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == another_node
assert blockchain.get_primary_validator(0) == another_node
assert blockchain.get_primary_validator(10) == another_node
assert blockchain.get_primary_validator(99) == another_node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
| 46.466667 | 108 | 0.809501 | from thenewboston_node.business_logic.blockchain.file_blockchain import FileBlockchain
from thenewboston_node.business_logic.models import (
AccountState, Block, Node, NodeDeclarationSignedChangeRequest, PrimaryValidatorSchedule,
PrimaryValidatorScheduleSignedChangeRequest
)
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.business_logic.tests.baker_factories import baker
from thenewboston_node.core.utils.cryptography import generate_key_pair
def test_no_pv_schedule(blockchain_directory, blockchain_genesis_state):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() is None
assert blockchain.get_primary_validator(0) is None
assert blockchain.get_primary_validator(10) is None
def test_can_get_pv_from_blockchain_genesis_state(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_pv_from_from_blocks(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
signing_key = user_account_key_pair.private
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['https://127.0.0.1:8555/'], fee_amount=3, signing_key=signing_key
)
node = request.message.node
assert node.identifier
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, signing_key)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_node_from_genesis_state_and_pv_from_blocks(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_overridden_pv(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
another_key_pair = generate_key_pair()
another_node = baker.make(Node, identifier=another_key_pair.public)
blockchain_genesis_state.account_states[another_key_pair.public] = AccountState(node=another_node)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, another_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == another_node
assert blockchain.get_primary_validator(0) == another_node
assert blockchain.get_primary_validator(10) == another_node
assert blockchain.get_primary_validator(99) == another_node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
| true | true |
f7216293508f30856c09ec8f6cc0f0a4c59f840b | 400 | py | Python | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | test/distributed/test_ddp_under_dist_autograd.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | null | null | null | #!/usr/bin/env python3
from torch.testing._internal.distributed import ddp_under_dist_autograd_test
from torch.testing._internal.common_utils import (
run_tests,
)
class TestDdpUnderDistAutogradWrapper(ddp_under_dist_autograd_test.TestDdpUnderDistAutograd):
pass
class TestDdpComparison(ddp_under_dist_autograd_test.TestDdpComparison):
pass
if __name__ == "__main__":
run_tests()
| 25 | 93 | 0.82 |
from torch.testing._internal.distributed import ddp_under_dist_autograd_test
from torch.testing._internal.common_utils import (
run_tests,
)
class TestDdpUnderDistAutogradWrapper(ddp_under_dist_autograd_test.TestDdpUnderDistAutograd):
pass
class TestDdpComparison(ddp_under_dist_autograd_test.TestDdpComparison):
pass
if __name__ == "__main__":
run_tests()
| true | true |
f7216360a3f39f268083811c68d247e2aa9fdaad | 5,037 | py | Python | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | models/pointnet_seg.py | 3D-semantic-Sgmentation/pointnet | 029c0217143e6b69e685ab57cf243e322d47860f | [
"MIT"
] | null | null | null | # import tensorflow as tf
import numpy as np
import math
import sys
import os
import tensorflow.compat.v1 as tf
import tensorflow as tf2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
tf.compat.v1.disable_eager_execution()
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0]
num_point = point_cloud.get_shape()[1]
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
print(global_feat)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
print(concat_feat)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.conv2d(net, 9, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf2.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1]
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf2.summary.scalar('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
labels = tf.zeros((32,1024))
print(labels.shape.rank)
pred, end_points = get_model(inputs, tf.constant(True))
loss = get_loss(pred, labels, end_points)
print(outputs)
| 40.95122 | 84 | 0.592416 |
import numpy as np
import math
import sys
import os
import tensorflow.compat.v1 as tf
import tensorflow as tf2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
tf.compat.v1.disable_eager_execution()
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, bn_decay=None):
batch_size = point_cloud.get_shape()[0]
num_point = point_cloud.get_shape()[1]
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
print(global_feat)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
print(concat_feat)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.conv2d(net, 9, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
net = tf.squeeze(net, [2])
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf2.summary.scalar('classify loss', classify_loss)
transform = end_points['transform']
K = transform.get_shape()[1]
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf2.summary.scalar('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
labels = tf.zeros((32,1024))
print(labels.shape.rank)
pred, end_points = get_model(inputs, tf.constant(True))
loss = get_loss(pred, labels, end_points)
print(outputs)
| true | true |
f721636de9ed88c4501fc4920a1f38058472b148 | 8,344 | py | Python | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | tests/adapters/test_dataframe_input.py | vedashree29296/BentoML | 79f94d543a0684e04551207d102a2d254b770ad3 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=redefined-outer-name
import itertools
import json
import math
import time
import flask
import numpy as np
import pandas as pd
import psutil # noqa # pylint: disable=unused-import
import pytest
from bentoml.adapters import DataframeInput
from bentoml.adapters.dataframe_input import read_dataframes_from_json_n_csv
from bentoml.utils.csv import csv_splitlines
from bentoml.utils.dataframe_util import guess_orient
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
dtype={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, make_api, tmpdir):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input-file", str(json_file)]
api.handle_cli(test_args)
out, _ = capsys.readouterr()
assert "john" in out
def test_dataframe_handle_aws_lambda_event(make_api):
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = api.handle_aws_lambda_event(event)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = api.handle_aws_lambda_event(event_without_content_type_header)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
response = api.handle_aws_lambda_event(event_with_bad_input)
assert response["statusCode"] == 400
def test_dataframe_handle_request_csv(make_api):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
csv_data = b'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = {'Content-Type': 'text/csv'}
request.get_data.return_value = csv_data
result = api.handle_request(request)
assert result.get_data().decode('utf-8') == '[{"name":"john"}]'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
'''
Compare two instances of pandas.DataFrame ignoring index and columns
'''
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]), # single dim sting array
pd.DataFrame([np.nan]), # special values
pd.DataFrame([math.nan]), # special values
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}), # special values
# pd.Series(np.random.rand(2)), # TODO: Series support
# pd.DataFrame([""]), # TODO: -> NaN
)
@pytest.fixture(params=DF_CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
# test content_type=application/json with various orients
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
# skip cases not supported by official pandas
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['json'] * 3)
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['csv'] * 3)
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(csv_splitlines(csv_str)).encode()
else:
csv_str = '\r\n'.join(csv_splitlines(csv_str)).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
assert not df_merged
for count in counts:
assert not count
def test_batch_read_dataframes_from_json_in_mixed_order():
# different column order when orient=records
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, counts = read_dataframes_from_json_n_csv([df_json], ['json'])
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], pd.read_json(df_json))
i += count
# different row/column order when orient=columns
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, counts = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['json'] * 3
)
i = 0
for count in counts:
assert_df_equal(
df_merged[i : i + count][["A", "B", "C"]],
pd.read_json(df_json1)[["A", "B", "C"]],
)
i += count
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = guess_orient(json.loads(json_str), strict=True)
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
'''
read_dataframes_from_json_n_csv should be 30x faster than pd.read_json + pd.concat
'''
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [pd.read_json(i) for i in inputs]
result1 = pd.concat(dfs)
time1 = time.time() - time_st
time_st = time.time()
result2, _ = read_dataframes_from_json_n_csv(
inputs, itertools.repeat('json'), 'columns'
)
time2 = time.time() - time_st
assert_df_equal(result1, result2)
# 5 is just an estimate on the smaller end, which should be true for most
# development machines and Github actions CI environment, the actual ratio depends
# on the hardware and available computing resource
assert time1 / time2 > 5
| 32.341085 | 87 | 0.647651 |
import itertools
import json
import math
import time
import flask
import numpy as np
import pandas as pd
import psutil apters import DataframeInput
from bentoml.adapters.dataframe_input import read_dataframes_from_json_n_csv
from bentoml.utils.csv import csv_splitlines
from bentoml.utils.dataframe_util import guess_orient
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
dtype={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, make_api, tmpdir):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input-file", str(json_file)]
api.handle_cli(test_args)
out, _ = capsys.readouterr()
assert "john" in out
def test_dataframe_handle_aws_lambda_event(make_api):
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = api.handle_aws_lambda_event(event)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = api.handle_aws_lambda_event(event_without_content_type_header)
assert response["statusCode"] == 200
assert response["body"] == '[{"name":"john"}]'
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
response = api.handle_aws_lambda_event(event_with_bad_input)
assert response["statusCode"] == 400
def test_dataframe_handle_request_csv(make_api):
def test_func(df):
return df["name"]
input_adapter = DataframeInput()
api = make_api(input_adapter, test_func)
csv_data = b'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = {'Content-Type': 'text/csv'}
request.get_data.return_value = csv_data
result = api.handle_request(request)
assert result.get_data().decode('utf-8') == '[{"name":"john"}]'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]),
pd.DataFrame([np.nan]),
pd.DataFrame([math.nan]),
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}),
CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['json'] * 3)
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['csv'] * 3)
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(csv_splitlines(csv_str)).encode()
else:
csv_str = '\r\n'.join(csv_splitlines(csv_str)).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], df)
i += count
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['json'] * 3
df_merged, counts = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
assert not df_merged
for count in counts:
assert not count
def test_batch_read_dataframes_from_json_in_mixed_order():
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, counts = read_dataframes_from_json_n_csv([df_json], ['json'])
i = 0
for count in counts:
assert_df_equal(df_merged[i : i + count], pd.read_json(df_json))
i += count
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, counts = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['json'] * 3
)
i = 0
for count in counts:
assert_df_equal(
df_merged[i : i + count][["A", "B", "C"]],
pd.read_json(df_json1)[["A", "B", "C"]],
)
i += count
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = guess_orient(json.loads(json_str), strict=True)
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [pd.read_json(i) for i in inputs]
result1 = pd.concat(dfs)
time1 = time.time() - time_st
time_st = time.time()
result2, _ = read_dataframes_from_json_n_csv(
inputs, itertools.repeat('json'), 'columns'
)
time2 = time.time() - time_st
assert_df_equal(result1, result2)
assert time1 / time2 > 5
| true | true |
f721649ced49c4e8a9613dfffcb798078e8b305e | 383 | py | Python | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | vespa-cloud/cord-19-search/scripts/convert-to-feed.py | kuipertan/sample-apps | d52b942ea228336435d29a7ed007e72113aec827 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import sys
import json
json_file = sys.argv[1]
with open(json_file, 'r') as f:
data = json.load(f)
for doc in data:
vespa_doc = {
'put': 'id:covid-19:doc::%s' % doc['id'],
'fields': doc
}
print(json.dumps(vespa_doc))
| 23.9375 | 111 | 0.64752 |
import sys
import json
json_file = sys.argv[1]
with open(json_file, 'r') as f:
data = json.load(f)
for doc in data:
vespa_doc = {
'put': 'id:covid-19:doc::%s' % doc['id'],
'fields': doc
}
print(json.dumps(vespa_doc))
| true | true |
f72164ba62f9af6d6912ac1fc695a0949c138d93 | 1,051 | py | Python | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | 42 | 2016-07-04T11:17:54.000Z | 2018-03-18T18:36:09.000Z | webservice/search/zeroconf_factory.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
from webservice.search.pybonjour_service import PybonjourService
from webservice.search.zeroconf_service import ZeroconfService
class ZeroconfFactory(object):
@staticmethod
def generate(name, port):
if PybonjourService.has_support():
return PybonjourService(name, port)
elif ZeroconfService.has_support():
return ZeroconfService(name, port)
else:
return MagicMock()
| 33.903226 | 74 | 0.744053 |
from unittest.mock import MagicMock
from webservice.search.pybonjour_service import PybonjourService
from webservice.search.zeroconf_service import ZeroconfService
class ZeroconfFactory(object):
@staticmethod
def generate(name, port):
if PybonjourService.has_support():
return PybonjourService(name, port)
elif ZeroconfService.has_support():
return ZeroconfService(name, port)
else:
return MagicMock()
| true | true |
f72164bc7374018f80baa8ffb8176085266dae60 | 397 | py | Python | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | CodingTest_Study1/week11/ex9095.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | 2 | 2020-12-27T15:03:46.000Z | 2021-03-06T14:13:34.000Z | # BOJ 1,2,3 더하기 9095
T = int(input()) # 테스트 케이스의 개수 T가 주어짐
sum_list = []
for i in range(T):
n = int(input())
sum_list.append(n)
def oneTwoThreeSum(n):
if n == 1:
return 1
if n == 2:
return 2
if n == 3:
return 4
else:
return oneTwoThreeSum(n-3) + oneTwoThreeSum(n-2) + oneTwoThreeSum(n-1)
for k in sum_list:
print(oneTwoThreeSum(k))
| 18.045455 | 78 | 0.561713 |
T = int(input())
sum_list = []
for i in range(T):
n = int(input())
sum_list.append(n)
def oneTwoThreeSum(n):
if n == 1:
return 1
if n == 2:
return 2
if n == 3:
return 4
else:
return oneTwoThreeSum(n-3) + oneTwoThreeSum(n-2) + oneTwoThreeSum(n-1)
for k in sum_list:
print(oneTwoThreeSum(k))
| true | true |
f7216512710c309d4a2ab0b0e09080660ee5e81b | 1,794 | py | Python | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | null | null | null | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | 7 | 2020-03-24T15:24:51.000Z | 2021-06-01T21:43:16.000Z | src/features/utils.py | iamhuy/rumour-veracity-verification | e7e7f0c100545c2758584719e9f20f20cb6d0a85 | [
"MIT"
] | null | null | null | from dateutil import parser
import preprocessor as p
def timestamp_to_date(timestamp):
"""
Conver a twitter timestamp to a datetime object
:param timestamp: a string represent the timestamp
:return: a datetime object
"""
return parser.parse(timestamp)
def day_diff(timestamp1, timestamp2):
"""
Number of days between 2 timestamps
:param timestamp1: first timestamp
:param timestamp2: second timestamp
:return: An integer indicating number of days between 2 timestamps
"""
return (timestamp_to_date(timestamp1) - timestamp_to_date(timestamp2)).days
def read_brown_cluster_file(brown_cluster_text_file):
"""
Read brown cluster text file and save into a dict
:param brown_cluster_text_file: brown cluster text file
:return: A dict, which keys are tokens and values are cluster ids
"""
brown_cluster_dict = dict()
cluster_id_dict = dict()
cluster_count = 0
for line in brown_cluster_text_file.read().splitlines():
arr = line.split('\t')
cluster_str = arr[0]
token = arr[1]
if not cluster_id_dict.has_key(cluster_str):
cluster_id_dict[cluster_str] = cluster_count
cluster_count+=1
brown_cluster_dict[token] = cluster_id_dict[cluster_str]
return brown_cluster_dict
def preprocess_tweet(tweet):
"""
Clean the tweet before feeding to other functions
:param tweet: a raw tweet
:return: tweet with URL, MENTIONS, EMOJI, HASTHTAGS removed
"""
cleaned_tweet = tweet.lower() # lowercase the tweet
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG) # set options for the preprocessor
cleaned_tweet = p.clean(cleaned_tweet.encode("ascii", "ignore"))
return cleaned_tweet;
| 29.9 | 107 | 0.696767 | from dateutil import parser
import preprocessor as p
def timestamp_to_date(timestamp):
return parser.parse(timestamp)
def day_diff(timestamp1, timestamp2):
return (timestamp_to_date(timestamp1) - timestamp_to_date(timestamp2)).days
def read_brown_cluster_file(brown_cluster_text_file):
brown_cluster_dict = dict()
cluster_id_dict = dict()
cluster_count = 0
for line in brown_cluster_text_file.read().splitlines():
arr = line.split('\t')
cluster_str = arr[0]
token = arr[1]
if not cluster_id_dict.has_key(cluster_str):
cluster_id_dict[cluster_str] = cluster_count
cluster_count+=1
brown_cluster_dict[token] = cluster_id_dict[cluster_str]
return brown_cluster_dict
def preprocess_tweet(tweet):
cleaned_tweet = tweet.lower()
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION, p.OPT.HASHTAG)
cleaned_tweet = p.clean(cleaned_tweet.encode("ascii", "ignore"))
return cleaned_tweet;
| true | true |
f72165731dd934a6ef471e84e61e6bbeae4d50c9 | 2,651 | py | Python | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | null | null | null | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | 1 | 2021-02-26T07:31:37.000Z | 2021-02-26T07:31:37.000Z | vtpl_api/models/destination_type.py | vtpl1/videonetics_api | bef179df12f449db0c50c3910daca50b7d40ac49 | [
"RSA-MD"
] | 2 | 2020-11-04T02:52:55.000Z | 2020-11-05T08:09:50.000Z | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
OpenAPI spec version: 1.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DestinationType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
ZEROMQ = "ZeroMQ"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""DestinationType - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DestinationType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DestinationType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.990196 | 80 | 0.536024 |
import pprint
import re
import six
class DestinationType(object):
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
ZEROMQ = "ZeroMQ"
swagger_types = {
}
attribute_map = {
}
def __init__(self):
self.discriminator = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DestinationType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, DestinationType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f721659832fd95400b106db9d00e562f8df54211 | 183 | py | Python | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 5 | 2021-09-09T18:32:49.000Z | 2022-01-10T10:31:17.000Z | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 2 | 2021-09-10T14:28:52.000Z | 2021-09-12T14:57:41.000Z | shopee_crawler/toolkit/__init__.py | ptrkhh/shopee-crawler | 6d85748daa802ad9bb2f42ba56695b31d692f4b4 | [
"MIT"
] | 6 | 2021-09-25T14:03:57.000Z | 2022-03-19T14:44:04.000Z | from .crawl_by_cat_url import crawl_by_cat_url
from .crawl_by_search import crawl_by_search
from .crawl_by_shop_url import crawl_by_shop_url
from .crawl_cat_list import crawl_cat_list | 45.75 | 48 | 0.896175 | from .crawl_by_cat_url import crawl_by_cat_url
from .crawl_by_search import crawl_by_search
from .crawl_by_shop_url import crawl_by_shop_url
from .crawl_cat_list import crawl_cat_list | true | true |
f72166b67f4730956f03af23668fb17b0bfb75ba | 170 | py | Python | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | old/dronekit-python/dronekit/util.py | sirmammingtonham/droneee | 1c0e1921a902b26958d298f3a0204465bf3e960d | [
"Unlicense"
] | null | null | null | from __future__ import print_function
import sys
def errprinter(*args):
logger(*args)
def logger(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
| 14.166667 | 37 | 0.7 | from __future__ import print_function
import sys
def errprinter(*args):
logger(*args)
def logger(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
| true | true |
f72168144f40c3dc94f255559a486ee91e85c71f | 10,646 | py | Python | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | 3 | 2020-09-04T09:34:51.000Z | 2020-09-04T09:39:26.000Z | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | userbot/plugins/chatinfo.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
}) | 46.286957 | 138 | 0.656021 |
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
}) | true | true |
f72168324e6096dddf572876cab151217254f430 | 3,592 | py | Python | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 24 | 2019-08-24T06:42:51.000Z | 2021-10-09T14:27:51.000Z | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 12 | 2019-07-31T06:56:19.000Z | 2020-12-05T18:08:54.000Z | examples/resume_train_segm.py | dani-lbnl/msdnet | 20f503322524ceb340379448f1778a58bb1f9a18 | [
"MIT"
] | 11 | 2019-09-17T02:39:24.000Z | 2022-03-30T21:28:35.000Z | #-----------------------------------------------------------------------
#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: D.M.Pelt@cwi.nl
#Website: http://dmpelt.github.io/msdnet/
#License: MIT
#
#This file is part of MSDNet, a Python implementation of the
#Mixed-Scale Dense Convolutional Neural Network.
#-----------------------------------------------------------------------
"""
Example 09: Resume training a network for segmentation
======================================================
This script resumes an earlier training of a MS-D network for
segmentation (i.e. labeling)
Run generatedata.py first to generate required training data, and
train_segm.py to generate a partially trained network.
"""
# Import code
import msdnet
from pathlib import Path
# Define training data
# First, create lists of input files (noisy) and target files (labels)
flsin = sorted((Path('train') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('train') / 'label').glob('*.tiff'))
# Create list of datapoints (i.e. input/target pairs)
dats = []
for i in range(len(flsin)):
# Create datapoint with file names
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
# Convert datapoint to one-hot, using labels 0, 1, 2, 3, and 4,
# which are the labels given in each label TIFF file.
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
# Augment data by rotating and flipping
d_augm = msdnet.data.RotateAndFlipDataPoint(d_oh)
# Add augmented datapoint to list
dats.append(d_augm)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# dats = msdnet.utils.load_simple_data('train/noisy/*.tiff', 'train/label/*.tiff', augment=True, labels=[0,1,2,3,4])
# Use image batches of a single image
bprov = msdnet.data.BatchProvider(dats,1)
# Define validation data (not using augmentation)
flsin = sorted((Path('val') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('val') / 'label').glob('*.tiff'))
datsv = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
datsv.append(d_oh)
# Note: The above can also be achieved using a utility function for such 'simple' cases:
# datsv = msdnet.utils.load_simple_data('train/noisy/*.tiff', 'train/label/*.tiff', augment=False, labels=[0,1,2,3,4])
# Load network, training algorithm, and validation object from checkpoint of previous training
n, t, val = msdnet.train.restore_training('segm_params.checkpoint', msdnet.network.SegmentationMSDNet, msdnet.train.AdamAlgorithm, msdnet.validate.MSEValidation, datsv, gpu=True)
# Select loss function
celoss = msdnet.loss.CrossEntropyLoss()
val.loss = celoss
t.loss = celoss
# Log error metrics to console
consolelog = msdnet.loggers.ConsoleLogger()
# Log error metrics to file
filelog = msdnet.loggers.FileLogger('log_segm.txt')
# Log typical, worst, and best images to image files
imagelog = msdnet.loggers.ImageLabelLogger('log_segm', onlyifbetter=True)
# Log typical, worst, and best images to image files
# Output probability map for a single channel (in this case, channel 3)
singlechannellog = msdnet.loggers.ImageLogger('log_segm_singlechannel', chan_out=3, onlyifbetter=True)
# Train network until program is stopped manually
# Network parameters are saved in segm_params.h5
# Validation is run after every len(datsv) (=25)
# training steps.
msdnet.train.train(n, t, val, bprov, 'segm_params_resumed.h5',loggers=[consolelog,filelog,imagelog,singlechannellog], val_every=len(datsv))
| 43.277108 | 178 | 0.700724 |
import msdnet
from pathlib import Path
flsin = sorted((Path('train') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('train') / 'label').glob('*.tiff'))
dats = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
d_augm = msdnet.data.RotateAndFlipDataPoint(d_oh)
dats.append(d_augm)
bprov = msdnet.data.BatchProvider(dats,1)
flsin = sorted((Path('val') / 'noisy').glob('*.tiff'))
flstg = sorted((Path('val') / 'label').glob('*.tiff'))
datsv = []
for i in range(len(flsin)):
d = msdnet.data.ImageFileDataPoint(str(flsin[i]),str(flstg[i]))
d_oh = msdnet.data.OneHotDataPoint(d, [0,1,2,3,4])
datsv.append(d_oh)
n, t, val = msdnet.train.restore_training('segm_params.checkpoint', msdnet.network.SegmentationMSDNet, msdnet.train.AdamAlgorithm, msdnet.validate.MSEValidation, datsv, gpu=True)
celoss = msdnet.loss.CrossEntropyLoss()
val.loss = celoss
t.loss = celoss
consolelog = msdnet.loggers.ConsoleLogger()
filelog = msdnet.loggers.FileLogger('log_segm.txt')
imagelog = msdnet.loggers.ImageLabelLogger('log_segm', onlyifbetter=True)
singlechannellog = msdnet.loggers.ImageLogger('log_segm_singlechannel', chan_out=3, onlyifbetter=True)
msdnet.train.train(n, t, val, bprov, 'segm_params_resumed.h5',loggers=[consolelog,filelog,imagelog,singlechannellog], val_every=len(datsv))
| true | true |
f721694c28a049e466ab20f52517ffcffb2f736f | 1,578 | py | Python | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | 2 | 2019-10-06T18:13:46.000Z | 2019-12-07T22:02:40.000Z | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | null | null | null | github.py | anoadragon453/msc-chatbot | ae8bc4b900df500e4f31b85041de2ebfbedd8dd9 | [
"Apache-2.0"
] | null | null | null | import requests
import json
from errors import BotException
import logging
logger = logging.getLogger(__name__)
class Github(object):
def __init__(self, repo_slug: str):
"""
Args:
repo_slug: The slug (user/repo_name) of the github repository
"""
# TODO: Add support for custom token
self.repo_slug = repo_slug
self.api_base = "https://api.github.com"
def get_info_for_issue_pr(self, num: int) -> dict:
"""Get the metadata of a github issue/PR
Args:
num: The issue/PR number
Returns:
dict[str, str]: Metadata about the issue/PR
Raises:
FileNotFoundError: The issue/PR was not found
"""
# Assume it's a PR. Query github's API
resp = requests.get(self.api_base + f"/repos/{self.repo_slug}/pulls/{num}")
if resp.status_code == 404 or not resp.content:
raise FileNotFoundError
# Load JSON
body = json.loads(resp.content)
if resp.status_code == 403:
# Check if this is a rate limit hit or an invalid token
if "message" in body:
logger.error(f"Rate-limit hit on {resp.url}. Consider using your own Github token.")
raise PermissionError("rate-limit hit")
logger.error(f"Forbidden on contacting {resp.url}. Check your access token.")
raise PermissionError("forbidden")
if resp.status_code != 200:
raise BotException(f"HTTP error ({resp.status_code})")
return body
| 30.941176 | 100 | 0.603295 | import requests
import json
from errors import BotException
import logging
logger = logging.getLogger(__name__)
class Github(object):
def __init__(self, repo_slug: str):
self.repo_slug = repo_slug
self.api_base = "https://api.github.com"
def get_info_for_issue_pr(self, num: int) -> dict:
resp = requests.get(self.api_base + f"/repos/{self.repo_slug}/pulls/{num}")
if resp.status_code == 404 or not resp.content:
raise FileNotFoundError
body = json.loads(resp.content)
if resp.status_code == 403:
if "message" in body:
logger.error(f"Rate-limit hit on {resp.url}. Consider using your own Github token.")
raise PermissionError("rate-limit hit")
logger.error(f"Forbidden on contacting {resp.url}. Check your access token.")
raise PermissionError("forbidden")
if resp.status_code != 200:
raise BotException(f"HTTP error ({resp.status_code})")
return body
| true | true |
f721696ba4b25105e5eb43dca6f3445e9352f0a4 | 265 | py | Python | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | 10 | 2015-01-27T23:01:03.000Z | 2016-12-14T01:00:49.000Z | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | null | null | null | ctfweb/admin.py | pdogg/ctfmanager | d8f0ac7d7e12d7973b7eb39cd30a0bc81e4cb770 | [
"BSD-3-Clause"
] | 8 | 2015-03-01T16:57:05.000Z | 2022-02-20T03:48:04.000Z | from django.contrib import admin
from ctfweb.models import *
admin.site.register(Game)
admin.site.register(Category)
admin.site.register(Challenge)
admin.site.register(Hint)
admin.site.register(Competitor)
admin.site.register(Solved)
admin.site.register(RegCodes)
| 24.090909 | 32 | 0.822642 | from django.contrib import admin
from ctfweb.models import *
admin.site.register(Game)
admin.site.register(Category)
admin.site.register(Challenge)
admin.site.register(Hint)
admin.site.register(Competitor)
admin.site.register(Solved)
admin.site.register(RegCodes)
| true | true |
f7216b0dc1766301347181cd7059ad601ead0155 | 11,484 | py | Python | components/app_update/otatool.py | thomasonw/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 6 | 2018-12-28T04:00:22.000Z | 2021-05-17T08:01:41.000Z | components/app_update/otatool.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-02-15T06:43:13.000Z | 2019-02-15T06:43:13.000Z | components/app_update/otatool.py | Wangrenai/esp-idf | abea9e4c02bb17e86298aec4e299780399e4789f | [
"Apache-2.0"
] | 1 | 2019-05-01T14:00:23.000Z | 2019-05-01T14:00:23.000Z | #!/usr/bin/env python
#
# otatool is used to perform ota-level operations - flashing ota partition
# erasing ota partition and switching ota partition
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import sys
import binascii
import subprocess
import tempfile
import collections
import struct
__version__ = '1.0'
IDF_COMPONENTS_PATH = os.path.expandvars(os.path.join("$IDF_PATH", "components"))
PARTTOOL_PY = os.path.join(IDF_COMPONENTS_PATH, "partition_table", "parttool.py")
SPI_FLASH_SEC_SIZE = 0x2000
quiet = False
def status(msg):
if not quiet:
print(msg)
def _invoke_parttool(parttool_args, args, output=False, partition=None):
invoke_args = []
if partition:
invoke_args += [sys.executable, PARTTOOL_PY] + partition
else:
invoke_args += [sys.executable, PARTTOOL_PY, "--partition-type", "data", "--partition-subtype", "ota"]
if quiet:
invoke_args += ["-q"]
if args.port != "":
invoke_args += ["--port", args.port]
if args.partition_table_file:
invoke_args += ["--partition-table-file", args.partition_table_file]
if args.partition_table_offset:
invoke_args += ["--partition-table-offset", args.partition_table_offset]
invoke_args += parttool_args
if output:
return subprocess.check_output(invoke_args)
else:
return subprocess.check_call(invoke_args)
def _get_otadata_contents(args, check=True):
global quiet
if check:
check_args = ["get_partition_info", "--info", "offset", "size"]
quiet = True
output = _invoke_parttool(check_args, args, True).split(b" ")
quiet = args.quiet
if not output:
raise RuntimeError("No ota_data partition found")
with tempfile.NamedTemporaryFile() as otadata_file:
invoke_args = ["read_partition", "--output", otadata_file.name]
_invoke_parttool(invoke_args, args)
return otadata_file.read()
def _get_otadata_status(otadata_contents):
status = []
otadata_status = collections.namedtuple("otadata_status", "seq crc")
for i in range(2):
start = i * (SPI_FLASH_SEC_SIZE >> 1)
seq = bytearray(otadata_contents[start:start + 4])
crc = bytearray(otadata_contents[start + 28:start + 32])
seq = struct.unpack('>I', seq)
crc = struct.unpack('>I', crc)
status.append(otadata_status(seq[0], crc[0]))
return status
def read_otadata(args):
status("Reading ota_data partition contents...")
otadata_info = _get_otadata_contents(args)
otadata_info = _get_otadata_status(otadata_info)
print(otadata_info)
print("\t\t{:11}\t{:8s}|\t{:8s}\t{:8s}".format("OTA_SEQ", "CRC", "OTA_SEQ", "CRC"))
print("Firmware: 0x{:8x} \t 0x{:8x} |\t0x{:8x} \t 0x{:8x}".format(otadata_info[0].seq, otadata_info[0].crc,
otadata_info[1].seq, otadata_info[1].crc))
def erase_otadata(args):
status("Erasing ota_data partition contents...")
_invoke_parttool(["erase_partition"], args)
status("Erased ota_data partition contents")
def switch_otadata(args):
sys.path.append(os.path.join(IDF_COMPONENTS_PATH, "partition_table"))
import gen_esp32part as gen
def is_otadata_status_valid(status):
seq = status.seq % (1 << 32)
crc = hex(binascii.crc32(struct.pack("I", seq), 0xFFFFFFFF) % (1 << 32))
return seq < (int('0xFFFFFFFF', 16) % (1 << 32)) and status.crc == crc
status("Looking for ota app partitions...")
# In order to get the number of ota app partitions, we need the partition table
partition_table = None
with tempfile.NamedTemporaryFile() as partition_table_file:
invoke_args = ["get_partition_info", "--table", partition_table_file.name]
_invoke_parttool(invoke_args, args)
partition_table = partition_table_file.read()
partition_table = gen.PartitionTable.from_binary(partition_table)
ota_partitions = list()
for i in range(gen.NUM_PARTITION_SUBTYPE_APP_OTA):
ota_partition = filter(lambda p: p.subtype == (gen.MIN_PARTITION_SUBTYPE_APP_OTA + i), partition_table)
try:
ota_partitions.append(list(ota_partition)[0])
except IndexError:
break
ota_partitions = sorted(ota_partitions, key=lambda p: p.subtype)
if not ota_partitions:
raise RuntimeError("No ota app partitions found")
status("Verifying partition to switch to exists...")
# Look for the app partition to switch to
ota_partition_next = None
try:
if args.name:
ota_partition_next = filter(lambda p: p.name == args.name, ota_partitions)
else:
ota_partition_next = filter(lambda p: p.subtype - gen.MIN_PARTITION_SUBTYPE_APP_OTA == args.slot, ota_partitions)
ota_partition_next = list(ota_partition_next)[0]
except IndexError:
raise RuntimeError("Partition to switch to not found")
otadata_contents = _get_otadata_contents(args)
otadata_status = _get_otadata_status(otadata_contents)
# Find the copy to base the computation for ota sequence number on
otadata_compute_base = -1
# Both are valid, take the max as computation base
if is_otadata_status_valid(otadata_status[0]) and is_otadata_status_valid(otadata_status[1]):
if otadata_status[0].seq >= otadata_status[1].seq:
otadata_compute_base = 0
else:
otadata_compute_base = 1
# Only one copy is valid, use that
elif is_otadata_status_valid(otadata_status[0]):
otadata_compute_base = 0
elif is_otadata_status_valid(otadata_status[1]):
otadata_compute_base = 1
# Both are invalid (could be initial state - all 0xFF's)
else:
pass
ota_seq_next = 0
ota_partitions_num = len(ota_partitions)
target_seq = (ota_partition_next.subtype & 0x0F) + 1
# Find the next ota sequence number
if otadata_compute_base == 0 or otadata_compute_base == 1:
base_seq = otadata_status[otadata_compute_base].seq % (1 << 32)
i = 0
while base_seq > target_seq % ota_partitions_num + i * ota_partitions_num:
i += 1
ota_seq_next = target_seq % ota_partitions_num + i * ota_partitions_num
else:
ota_seq_next = target_seq
# Create binary data from computed values
ota_seq_next = struct.pack("I", ota_seq_next)
ota_seq_crc_next = binascii.crc32(ota_seq_next, 0xFFFFFFFF) % (1 << 32)
ota_seq_crc_next = struct.pack("I", ota_seq_crc_next)
with tempfile.NamedTemporaryFile() as otadata_next_file:
start = (1 if otadata_compute_base == 0 else 0) * (SPI_FLASH_SEC_SIZE >> 1)
otadata_next_file.write(otadata_contents)
otadata_next_file.seek(start)
otadata_next_file.write(ota_seq_next)
otadata_next_file.seek(start + 28)
otadata_next_file.write(ota_seq_crc_next)
otadata_next_file.flush()
_invoke_parttool(["write_partition", "--input", otadata_next_file.name], args)
status("Updated ota_data partition")
def _get_partition_specifier(args):
if args.name:
return ["--partition-name", args.name]
else:
return ["--partition-type", "app", "--partition-subtype", "ota_" + str(args.slot)]
def read_ota_partition(args):
invoke_args = ["read_partition", "--output", args.output]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Read ota partition contents to file {}".format(args.output))
def write_ota_partition(args):
invoke_args = ["write_partition", "--input", args.input]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Written contents of file {} to ota partition".format(args.input))
def erase_ota_partition(args):
invoke_args = ["erase_partition"]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Erased contents of ota partition")
def main():
global quiet
parser = argparse.ArgumentParser("ESP-IDF OTA Partitions Tool")
parser.add_argument("--quiet", "-q", help="suppress stderr messages", action="store_true")
# There are two possible sources for the partition table: a device attached to the host
# or a partition table CSV/binary file. These sources are mutually exclusive.
partition_table_info_source_args = parser.add_mutually_exclusive_group()
partition_table_info_source_args.add_argument("--port", "-p", help="port where the device to read the partition table from is attached", default="")
partition_table_info_source_args.add_argument("--partition-table-file", "-f", help="file (CSV/binary) to read the partition table from", default="")
parser.add_argument("--partition-table-offset", "-o", help="offset to read the partition table from", default="0x8000")
subparsers = parser.add_subparsers(dest="operation", help="run otatool -h for additional help")
# Specify the supported operations
subparsers.add_parser("read_otadata", help="read otadata partition")
subparsers.add_parser("erase_otadata", help="erase otadata partition")
slot_or_name_parser = argparse.ArgumentParser(add_help=False)
slot_or_name_parser_args = slot_or_name_parser.add_mutually_exclusive_group()
slot_or_name_parser_args.add_argument("--slot", help="slot number of the ota partition", type=int)
slot_or_name_parser_args.add_argument("--name", help="name of the ota partition")
subparsers.add_parser("switch_otadata", help="switch otadata partition", parents=[slot_or_name_parser])
read_ota_partition_subparser = subparsers.add_parser("read_ota_partition", help="read contents of an ota partition", parents=[slot_or_name_parser])
read_ota_partition_subparser.add_argument("--output", help="file to write the contents of the ota partition to")
write_ota_partition_subparser = subparsers.add_parser("write_ota_partition", help="write contents to an ota partition", parents=[slot_or_name_parser])
write_ota_partition_subparser.add_argument("--input", help="file whose contents to write to the ota partition")
subparsers.add_parser("erase_ota_partition", help="erase contents of an ota partition", parents=[slot_or_name_parser])
args = parser.parse_args()
quiet = args.quiet
# No operation specified, display help and exit
if args.operation is None:
if not quiet:
parser.print_help()
sys.exit(1)
# Else execute the operation
operation_func = globals()[args.operation]
if quiet:
# If exceptions occur, suppress and exit quietly
try:
operation_func(args)
except Exception:
sys.exit(2)
else:
operation_func(args)
if __name__ == '__main__':
main()
| 35.012195 | 154 | 0.698015 |
function, division
import argparse
import os
import sys
import binascii
import subprocess
import tempfile
import collections
import struct
__version__ = '1.0'
IDF_COMPONENTS_PATH = os.path.expandvars(os.path.join("$IDF_PATH", "components"))
PARTTOOL_PY = os.path.join(IDF_COMPONENTS_PATH, "partition_table", "parttool.py")
SPI_FLASH_SEC_SIZE = 0x2000
quiet = False
def status(msg):
if not quiet:
print(msg)
def _invoke_parttool(parttool_args, args, output=False, partition=None):
invoke_args = []
if partition:
invoke_args += [sys.executable, PARTTOOL_PY] + partition
else:
invoke_args += [sys.executable, PARTTOOL_PY, "--partition-type", "data", "--partition-subtype", "ota"]
if quiet:
invoke_args += ["-q"]
if args.port != "":
invoke_args += ["--port", args.port]
if args.partition_table_file:
invoke_args += ["--partition-table-file", args.partition_table_file]
if args.partition_table_offset:
invoke_args += ["--partition-table-offset", args.partition_table_offset]
invoke_args += parttool_args
if output:
return subprocess.check_output(invoke_args)
else:
return subprocess.check_call(invoke_args)
def _get_otadata_contents(args, check=True):
global quiet
if check:
check_args = ["get_partition_info", "--info", "offset", "size"]
quiet = True
output = _invoke_parttool(check_args, args, True).split(b" ")
quiet = args.quiet
if not output:
raise RuntimeError("No ota_data partition found")
with tempfile.NamedTemporaryFile() as otadata_file:
invoke_args = ["read_partition", "--output", otadata_file.name]
_invoke_parttool(invoke_args, args)
return otadata_file.read()
def _get_otadata_status(otadata_contents):
status = []
otadata_status = collections.namedtuple("otadata_status", "seq crc")
for i in range(2):
start = i * (SPI_FLASH_SEC_SIZE >> 1)
seq = bytearray(otadata_contents[start:start + 4])
crc = bytearray(otadata_contents[start + 28:start + 32])
seq = struct.unpack('>I', seq)
crc = struct.unpack('>I', crc)
status.append(otadata_status(seq[0], crc[0]))
return status
def read_otadata(args):
status("Reading ota_data partition contents...")
otadata_info = _get_otadata_contents(args)
otadata_info = _get_otadata_status(otadata_info)
print(otadata_info)
print("\t\t{:11}\t{:8s}|\t{:8s}\t{:8s}".format("OTA_SEQ", "CRC", "OTA_SEQ", "CRC"))
print("Firmware: 0x{:8x} \t 0x{:8x} |\t0x{:8x} \t 0x{:8x}".format(otadata_info[0].seq, otadata_info[0].crc,
otadata_info[1].seq, otadata_info[1].crc))
def erase_otadata(args):
status("Erasing ota_data partition contents...")
_invoke_parttool(["erase_partition"], args)
status("Erased ota_data partition contents")
def switch_otadata(args):
sys.path.append(os.path.join(IDF_COMPONENTS_PATH, "partition_table"))
import gen_esp32part as gen
def is_otadata_status_valid(status):
seq = status.seq % (1 << 32)
crc = hex(binascii.crc32(struct.pack("I", seq), 0xFFFFFFFF) % (1 << 32))
return seq < (int('0xFFFFFFFF', 16) % (1 << 32)) and status.crc == crc
status("Looking for ota app partitions...")
partition_table = None
with tempfile.NamedTemporaryFile() as partition_table_file:
invoke_args = ["get_partition_info", "--table", partition_table_file.name]
_invoke_parttool(invoke_args, args)
partition_table = partition_table_file.read()
partition_table = gen.PartitionTable.from_binary(partition_table)
ota_partitions = list()
for i in range(gen.NUM_PARTITION_SUBTYPE_APP_OTA):
ota_partition = filter(lambda p: p.subtype == (gen.MIN_PARTITION_SUBTYPE_APP_OTA + i), partition_table)
try:
ota_partitions.append(list(ota_partition)[0])
except IndexError:
break
ota_partitions = sorted(ota_partitions, key=lambda p: p.subtype)
if not ota_partitions:
raise RuntimeError("No ota app partitions found")
status("Verifying partition to switch to exists...")
ota_partition_next = None
try:
if args.name:
ota_partition_next = filter(lambda p: p.name == args.name, ota_partitions)
else:
ota_partition_next = filter(lambda p: p.subtype - gen.MIN_PARTITION_SUBTYPE_APP_OTA == args.slot, ota_partitions)
ota_partition_next = list(ota_partition_next)[0]
except IndexError:
raise RuntimeError("Partition to switch to not found")
otadata_contents = _get_otadata_contents(args)
otadata_status = _get_otadata_status(otadata_contents)
otadata_compute_base = -1
if is_otadata_status_valid(otadata_status[0]) and is_otadata_status_valid(otadata_status[1]):
if otadata_status[0].seq >= otadata_status[1].seq:
otadata_compute_base = 0
else:
otadata_compute_base = 1
elif is_otadata_status_valid(otadata_status[0]):
otadata_compute_base = 0
elif is_otadata_status_valid(otadata_status[1]):
otadata_compute_base = 1
else:
pass
ota_seq_next = 0
ota_partitions_num = len(ota_partitions)
target_seq = (ota_partition_next.subtype & 0x0F) + 1
# Find the next ota sequence number
if otadata_compute_base == 0 or otadata_compute_base == 1:
base_seq = otadata_status[otadata_compute_base].seq % (1 << 32)
i = 0
while base_seq > target_seq % ota_partitions_num + i * ota_partitions_num:
i += 1
ota_seq_next = target_seq % ota_partitions_num + i * ota_partitions_num
else:
ota_seq_next = target_seq
# Create binary data from computed values
ota_seq_next = struct.pack("I", ota_seq_next)
ota_seq_crc_next = binascii.crc32(ota_seq_next, 0xFFFFFFFF) % (1 << 32)
ota_seq_crc_next = struct.pack("I", ota_seq_crc_next)
with tempfile.NamedTemporaryFile() as otadata_next_file:
start = (1 if otadata_compute_base == 0 else 0) * (SPI_FLASH_SEC_SIZE >> 1)
otadata_next_file.write(otadata_contents)
otadata_next_file.seek(start)
otadata_next_file.write(ota_seq_next)
otadata_next_file.seek(start + 28)
otadata_next_file.write(ota_seq_crc_next)
otadata_next_file.flush()
_invoke_parttool(["write_partition", "--input", otadata_next_file.name], args)
status("Updated ota_data partition")
def _get_partition_specifier(args):
if args.name:
return ["--partition-name", args.name]
else:
return ["--partition-type", "app", "--partition-subtype", "ota_" + str(args.slot)]
def read_ota_partition(args):
invoke_args = ["read_partition", "--output", args.output]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Read ota partition contents to file {}".format(args.output))
def write_ota_partition(args):
invoke_args = ["write_partition", "--input", args.input]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Written contents of file {} to ota partition".format(args.input))
def erase_ota_partition(args):
invoke_args = ["erase_partition"]
_invoke_parttool(invoke_args, args, partition=_get_partition_specifier(args))
status("Erased contents of ota partition")
def main():
global quiet
parser = argparse.ArgumentParser("ESP-IDF OTA Partitions Tool")
parser.add_argument("--quiet", "-q", help="suppress stderr messages", action="store_true")
# There are two possible sources for the partition table: a device attached to the host
# or a partition table CSV/binary file. These sources are mutually exclusive.
partition_table_info_source_args = parser.add_mutually_exclusive_group()
partition_table_info_source_args.add_argument("--port", "-p", help="port where the device to read the partition table from is attached", default="")
partition_table_info_source_args.add_argument("--partition-table-file", "-f", help="file (CSV/binary) to read the partition table from", default="")
parser.add_argument("--partition-table-offset", "-o", help="offset to read the partition table from", default="0x8000")
subparsers = parser.add_subparsers(dest="operation", help="run otatool -h for additional help")
# Specify the supported operations
subparsers.add_parser("read_otadata", help="read otadata partition")
subparsers.add_parser("erase_otadata", help="erase otadata partition")
slot_or_name_parser = argparse.ArgumentParser(add_help=False)
slot_or_name_parser_args = slot_or_name_parser.add_mutually_exclusive_group()
slot_or_name_parser_args.add_argument("--slot", help="slot number of the ota partition", type=int)
slot_or_name_parser_args.add_argument("--name", help="name of the ota partition")
subparsers.add_parser("switch_otadata", help="switch otadata partition", parents=[slot_or_name_parser])
read_ota_partition_subparser = subparsers.add_parser("read_ota_partition", help="read contents of an ota partition", parents=[slot_or_name_parser])
read_ota_partition_subparser.add_argument("--output", help="file to write the contents of the ota partition to")
write_ota_partition_subparser = subparsers.add_parser("write_ota_partition", help="write contents to an ota partition", parents=[slot_or_name_parser])
write_ota_partition_subparser.add_argument("--input", help="file whose contents to write to the ota partition")
subparsers.add_parser("erase_ota_partition", help="erase contents of an ota partition", parents=[slot_or_name_parser])
args = parser.parse_args()
quiet = args.quiet
# No operation specified, display help and exit
if args.operation is None:
if not quiet:
parser.print_help()
sys.exit(1)
# Else execute the operation
operation_func = globals()[args.operation]
if quiet:
# If exceptions occur, suppress and exit quietly
try:
operation_func(args)
except Exception:
sys.exit(2)
else:
operation_func(args)
if __name__ == '__main__':
main()
| true | true |
f7216b59c00f5f5b82ec9c7b9bf5292699ace5fe | 82 | py | Python | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 12 | 2020-12-30T02:50:01.000Z | 2021-11-08T11:32:51.000Z | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 3 | 2021-01-26T09:31:43.000Z | 2021-12-08T08:31:54.000Z | akshare/fx/__init__.py | ghmole/akshare | eeeec96f90c6738bcd9ce92fcfa6b9c9176928a6 | [
"MIT"
] | 13 | 2020-07-08T08:48:33.000Z | 2022-03-23T08:37:11.000Z | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/20 10:57
Desc:
"""
| 11.714286 | 22 | 0.54878 | true | true | |
f7216c4cb45aea88f34bb4f84f11c15334366e5e | 6,588 | py | Python | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 5,249 | 2019-06-17T17:20:34.000Z | 2022-03-31T17:56:05.000Z | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 1,721 | 2019-06-17T18:13:29.000Z | 2022-03-31T16:09:53.000Z | tools/Polygraphy/tests/comparator/test_comparator.py | SsisyphusTao/TensorRT | 69f5a5093a39184e137a55c908d5c4d1340b009a | [
"Apache-2.0"
] | 1,414 | 2019-06-18T04:01:17.000Z | 2022-03-31T09:16:53.000Z | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess as sp
import numpy as np
import pytest
import tensorrt as trt
from polygraphy.backend.onnx import BytesFromOnnx, OnnxFromTfGraph, GsFromOnnx
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxBytes, TrtRunner
from polygraphy.exception import PolygraphyException
from polygraphy.comparator import Comparator, CompareFunc, DataLoader, IterationResult, PostprocessFunc, RunResults
from polygraphy import mod
from tests.models.meta import ONNX_MODELS, TF_MODELS
class TestComparator(object):
def test_warmup_runs(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader))
run_results = Comparator.run([runner], warm_up=2)
assert len(run_results[runner.name]) == 1
def test_list_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
run_results = Comparator.run([runner], data_loader=data)
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data):
assert np.all(actual["y"] == expected["x"])
def test_generator_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
def data():
for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
yield feed_dict
run_results = Comparator.run([runner], data_loader=data())
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data()):
assert np.all(actual["y"] == expected["x"])
def test_multiple_runners(self):
load_tf = TF_MODELS["identity"].loader
build_tf_session = SessionFromGraph(load_tf)
onnx_model = OnnxFromTfGraph(load_tf)
load_serialized_onnx = BytesFromOnnx(onnx_model)
build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
gs_graph = GsFromOnnx(onnx_model)
runners = [
TfRunner(build_tf_session),
OnnxrtRunner(build_onnxrt_session),
PluginRefRunner(gs_graph),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
def test_postprocess(self):
onnx_loader = ONNX_MODELS["identity"].loader
run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
# Output shape is (1, 1, 2, 2)
postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
for _, results in postprocessed.items():
for result in results:
for _, output in result.items():
assert output.shape == (1, 1, 2, 1)
def test_errors_do_not_hang(self):
# Should error because interface is not implemented correctly.
class FakeRunner(object):
def __init__(self):
self.name = "fake"
runners = [FakeRunner()]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_segfault_does_not_hang(self):
def raise_called_process_error():
class FakeSegfault(sp.CalledProcessError):
pass
raise FakeSegfault(-11, ["simulate", "segfault"])
runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_multirun_outputs_are_different(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))
iteration0 = run_results[runner.name][0]
iteration1 = run_results[runner.name][1]
for name in iteration0.keys():
assert np.any(iteration0[name] != iteration1[name])
def test_validate_nan(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.nan)})]
assert not Comparator.validate(run_results)
def test_validate_inf(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.inf)})]
assert not Comparator.validate(run_results, check_inf=True)
def test_dim_param_trt_onnxrt(self):
load_onnx_bytes = ONNX_MODELS["dim_param"].loader
build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))
runners = [
OnnxrtRunner(build_onnxrt_session),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1 # Default number of iterations
| 43.92 | 117 | 0.696266 |
import subprocess as sp
import numpy as np
import pytest
import tensorrt as trt
from polygraphy.backend.onnx import BytesFromOnnx, OnnxFromTfGraph, GsFromOnnx
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.pluginref import PluginRefRunner
from polygraphy.backend.tf import SessionFromGraph, TfRunner
from polygraphy.backend.trt import EngineFromNetwork, NetworkFromOnnxBytes, TrtRunner
from polygraphy.exception import PolygraphyException
from polygraphy.comparator import Comparator, CompareFunc, DataLoader, IterationResult, PostprocessFunc, RunResults
from polygraphy import mod
from tests.models.meta import ONNX_MODELS, TF_MODELS
class TestComparator(object):
def test_warmup_runs(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader))
run_results = Comparator.run([runner], warm_up=2)
assert len(run_results[runner.name]) == 1
def test_list_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
data = [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2
run_results = Comparator.run([runner], data_loader=data)
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data):
assert np.all(actual["y"] == expected["x"])
def test_generator_as_data_loader(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = OnnxrtRunner(SessionFromOnnx(onnx_loader), name="onnx_runner")
def data():
for feed_dict in [{"x": np.ones((1, 1, 2, 2), dtype=np.float32)}] * 2:
yield feed_dict
run_results = Comparator.run([runner], data_loader=data())
iter_results = run_results["onnx_runner"]
assert len(iter_results) == 2
for actual, expected in zip(iter_results, data()):
assert np.all(actual["y"] == expected["x"])
def test_multiple_runners(self):
load_tf = TF_MODELS["identity"].loader
build_tf_session = SessionFromGraph(load_tf)
onnx_model = OnnxFromTfGraph(load_tf)
load_serialized_onnx = BytesFromOnnx(onnx_model)
build_onnxrt_session = SessionFromOnnx(load_serialized_onnx)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_serialized_onnx))
gs_graph = GsFromOnnx(onnx_model)
runners = [
TfRunner(build_tf_session),
OnnxrtRunner(build_onnxrt_session),
PluginRefRunner(gs_graph),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1
def test_postprocess(self):
onnx_loader = ONNX_MODELS["identity"].loader
run_results = Comparator.run([OnnxrtRunner(SessionFromOnnx(onnx_loader))], use_subprocess=True)
postprocessed = Comparator.postprocess(run_results, postprocess_func=PostprocessFunc.topk_func(k=1, axis=-1))
for _, results in postprocessed.items():
for result in results:
for _, output in result.items():
assert output.shape == (1, 1, 2, 1)
def test_errors_do_not_hang(self):
class FakeRunner(object):
def __init__(self):
self.name = "fake"
runners = [FakeRunner()]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_segfault_does_not_hang(self):
def raise_called_process_error():
class FakeSegfault(sp.CalledProcessError):
pass
raise FakeSegfault(-11, ["simulate", "segfault"])
runners = [TrtRunner(EngineFromNetwork(raise_called_process_error))]
with pytest.raises(PolygraphyException):
Comparator.run(runners, use_subprocess=True, subprocess_polling_interval=1)
def test_multirun_outputs_are_different(self):
onnx_loader = ONNX_MODELS["identity"].loader
runner = TrtRunner(EngineFromNetwork(NetworkFromOnnxBytes(onnx_loader)))
run_results = Comparator.run([runner], data_loader=DataLoader(iterations=2))
iteration0 = run_results[runner.name][0]
iteration1 = run_results[runner.name][1]
for name in iteration0.keys():
assert np.any(iteration0[name] != iteration1[name])
def test_validate_nan(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.nan)})]
assert not Comparator.validate(run_results)
def test_validate_inf(self):
run_results = RunResults()
run_results["fake-runner"] = [IterationResult(outputs={"x": np.array(np.inf)})]
assert not Comparator.validate(run_results, check_inf=True)
def test_dim_param_trt_onnxrt(self):
load_onnx_bytes = ONNX_MODELS["dim_param"].loader
build_onnxrt_session = SessionFromOnnx(load_onnx_bytes)
load_engine = EngineFromNetwork(NetworkFromOnnxBytes(load_onnx_bytes))
runners = [
OnnxrtRunner(build_onnxrt_session),
TrtRunner(load_engine),
]
run_results = Comparator.run(runners)
compare_func = CompareFunc.simple(check_shapes=mod.version(trt.__version__) >= mod.version("7.0"))
assert bool(Comparator.compare_accuracy(run_results, compare_func=compare_func))
assert len(list(run_results.values())[0]) == 1
| true | true |
f7216d1ac89a7301575efb5070db47b073f062f7 | 1,614 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/sub_protection_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubProtectionPolicy(Model):
"""Sub-protection policy which includes schedule and retention.
:param policy_type: Type of backup policy type
:type policy_type: str
:param schedule_policy: Backup schedule specified as part of backup
policy.
:type schedule_policy:
~azure.mgmt.recoveryservicesbackup.models.SchedulePolicy
:param retention_policy: Retention policy with the details on backup copy
retention ranges.
:type retention_policy:
~azure.mgmt.recoveryservicesbackup.models.RetentionPolicy
"""
_attribute_map = {
'policy_type': {'key': 'policyType', 'type': 'str'},
'schedule_policy': {'key': 'schedulePolicy', 'type': 'SchedulePolicy'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, **kwargs):
super(SubProtectionPolicy, self).__init__(**kwargs)
self.policy_type = kwargs.get('policy_type', None)
self.schedule_policy = kwargs.get('schedule_policy', None)
self.retention_policy = kwargs.get('retention_policy', None)
| 39.365854 | 82 | 0.64746 |
from msrest.serialization import Model
class SubProtectionPolicy(Model):
_attribute_map = {
'policy_type': {'key': 'policyType', 'type': 'str'},
'schedule_policy': {'key': 'schedulePolicy', 'type': 'SchedulePolicy'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, **kwargs):
super(SubProtectionPolicy, self).__init__(**kwargs)
self.policy_type = kwargs.get('policy_type', None)
self.schedule_policy = kwargs.get('schedule_policy', None)
self.retention_policy = kwargs.get('retention_policy', None)
| true | true |
f7216ef57361718e2a601232dbdfdcdcad313aad | 640 | py | Python | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | 1 | 2021-12-26T03:27:26.000Z | 2021-12-26T03:27:26.000Z | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | 6 | 2021-09-01T19:52:46.000Z | 2022-02-15T20:48:27.000Z | backend/colaboradores/schema.py | leonunesbs/medico | 384796f346b001d028e1bec2676ae7242749a79a | [
"MIT"
] | null | null | null | from graphene import relay, ObjectType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Colaborador
class ColaboradorNode(DjangoObjectType):
class Meta:
model = Colaborador
filter_fields = '__all__'
interfaces = (relay.Node, )
def resolve_id(self, info):
return super().resolve_id(info)
class Query(ObjectType):
colaborador = relay.Node.Field(ColaboradorNode)
all_colaboradores = DjangoFilterConnectionField(ColaboradorNode)
class Mutation(ObjectType):
pass
class Subscription(ObjectType):
pass
| 22.068966 | 68 | 0.753125 | from graphene import relay, ObjectType
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Colaborador
class ColaboradorNode(DjangoObjectType):
class Meta:
model = Colaborador
filter_fields = '__all__'
interfaces = (relay.Node, )
def resolve_id(self, info):
return super().resolve_id(info)
class Query(ObjectType):
colaborador = relay.Node.Field(ColaboradorNode)
all_colaboradores = DjangoFilterConnectionField(ColaboradorNode)
class Mutation(ObjectType):
pass
class Subscription(ObjectType):
pass
| true | true |
f721704148332e77abcaafead1bc2fa7b96d4007 | 1,233 | py | Python | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | 5 | 2021-01-28T21:18:27.000Z | 2022-03-25T19:10:01.000Z | src/server/services/mp/settings/save.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs.exception import Error
from server.domain.models import WechatshopUser
import re
import time
import base64
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 保存用户信息 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
name = req_body.name
mobile = req_body.mobile
user_id = req_body.userId
if len(mobile) < 11:
raise Error(-1, '长度不对')
if not mobile or not name:
raise Error(-1, '手机或名字不可为空')
mobile_re = re.compile('^(13\d|14[5|7]|15\d|166|17[3|6|7]|18\d)\d{8}$')
res = re.search(mobile_re, int(mobile))
if not res:
raise Error(-1, '请输入正确手机号')
data = {
'name': name,
'mobile': mobile,
'name_mobile': 1
}
session.query(WechatshopUser).filter(WechatshopUser.id == user_id).update(data)
session.flush()
| 25.163265 | 87 | 0.633414 |
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs.exception import Error
from server.domain.models import WechatshopUser
import re
import time
import base64
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
name = req_body.name
mobile = req_body.mobile
user_id = req_body.userId
if len(mobile) < 11:
raise Error(-1, '长度不对')
if not mobile or not name:
raise Error(-1, '手机或名字不可为空')
mobile_re = re.compile('^(13\d|14[5|7]|15\d|166|17[3|6|7]|18\d)\d{8}$')
res = re.search(mobile_re, int(mobile))
if not res:
raise Error(-1, '请输入正确手机号')
data = {
'name': name,
'mobile': mobile,
'name_mobile': 1
}
session.query(WechatshopUser).filter(WechatshopUser.id == user_id).update(data)
session.flush()
| true | true |
f72170a4485c1de01d00d965e672b8a4a3f275cd | 3,311 | py | Python | robotx/core/listener.py | ylbian/robot-d | 4046d8ff1774399d2983c35bb4d4b6b51efc37cf | [
"MIT"
] | 44 | 2015-01-10T12:09:53.000Z | 2021-09-30T22:56:07.000Z | robotx/core/listener.py | ylbian/robot-d | 4046d8ff1774399d2983c35bb4d4b6b51efc37cf | [
"MIT"
] | null | null | null | robotx/core/listener.py | ylbian/robot-d | 4046d8ff1774399d2983c35bb4d4b6b51efc37cf | [
"MIT"
] | 25 | 2015-05-25T05:26:19.000Z | 2021-11-10T15:48:17.000Z | """
RobotX Listener.
Integrate with Test Case Management System, such as, test-run creating.
result re-write.
Author: Xin Gao <fdumpling@gmail.com>
"""
import re
from robotx.core.nitrateclient import TCMS
class TCMSListener(object):
"""
integrate with Test Case Management System,
such as, test-run creating, case-run status updating,
tests syntex checking ...
$ pybot --loglevel DEBUG
--listener listener.TCMSListener:8243:52490 keyword_driven.txt
>>> import os
>>> test_source = '/home/xin/tmp/RobotDemo/keyword_driven.txt'
>>> cmd = 'pybot --listener listener.TCMSListener %s' % test_source
>>> os.system(cmd)
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, planid, runid):
self.planid = planid
self.runid = runid
self.tcms = TCMS()
self.tcms.update_run_status(self.runid, 0)
self.caseruns = []
def start_suite(self, name, attrs):
"""
do sth when one test-suite start to run.
long name is: Suite1 & Suite2.Suite2.Invalid Login
source: /home/xin/tmp/WebDemo/suite2/invalid_login.txt
"""
pass
def start_test(self, name, attrs):
"""
do sth when one case-run start to run.
"""
caserun = {}
caserun['name'] = name
tags = attrs['tags']
case_id = re.findall('ID_\d+|id_\d+', str(tags))[0][3:]
caserun['id'] = self.tcms.get_caserun_id(self.runid, case_id)
caserun['tags'] = tags
caserun['status'] = 'RUNNING'
caserun['starttime'] = attrs['starttime']
# change tcms case status to RUNNING
self.tcms.update_caserun_status(caserun['id'], caserun['status'])
print '\n', '*' * 79
print 'Start Running Time: ', caserun['starttime']
self.caseruns.append(caserun)
def end_test(self, name, attrs):
"""
do sth when one case-run finish.
"""
caserun = self.caseruns[-1]
caserun['status'] = attrs['status']
caserun['endtime'] = attrs['endtime']
caserun['message'] = attrs['message']
if caserun.has_key('logtime'):
caserun['log'] = '\n' + '*' * 30 + '\n' + caserun['logtime'] + \
'\n' + attrs['message'] + '\n' + \
caserun['loginfo'] + '\n' + '*' * 30
else:
caserun['log'] = ''
# change tcms case status to attrs['status'], PASS/FAIL
caserun_status = caserun['status'] + 'ED'
self.tcms.update_caserun_status(caserun['id'], caserun_status)
if caserun['status'] != 'PASS':
self.tcms.update_caserun_log(caserun['id'], caserun['log'])
print 'End Running Time: ', caserun['endtime']
print '*' * 79, '\n'
def log_message(self, message):
"""
do sth when one keyword error
"""
if len(self.caseruns) > 0:
caserun = self.caseruns[-1]
caserun['loginfo'] = message['message']
caserun['logtime'] = message['timestamp']
else:
pass
def close(self):
"""
do sth when all test-caseruns are end.
"""
self.tcms.update_run_status(self.runid, 1)
print '\n', 'AUTOMATION DONE'.center(70, '*'), '\n'
| 32.460784 | 76 | 0.563878 | """
RobotX Listener.
Integrate with Test Case Management System, such as, test-run creating.
result re-write.
Author: Xin Gao <fdumpling@gmail.com>
"""
import re
from robotx.core.nitrateclient import TCMS
class TCMSListener(object):
"""
integrate with Test Case Management System,
such as, test-run creating, case-run status updating,
tests syntex checking ...
$ pybot --loglevel DEBUG
--listener listener.TCMSListener:8243:52490 keyword_driven.txt
>>> import os
>>> test_source = '/home/xin/tmp/RobotDemo/keyword_driven.txt'
>>> cmd = 'pybot --listener listener.TCMSListener %s' % test_source
>>> os.system(cmd)
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, planid, runid):
self.planid = planid
self.runid = runid
self.tcms = TCMS()
self.tcms.update_run_status(self.runid, 0)
self.caseruns = []
def start_suite(self, name, attrs):
"""
do sth when one test-suite start to run.
long name is: Suite1 & Suite2.Suite2.Invalid Login
source: /home/xin/tmp/WebDemo/suite2/invalid_login.txt
"""
pass
def start_test(self, name, attrs):
"""
do sth when one case-run start to run.
"""
caserun = {}
caserun['name'] = name
tags = attrs['tags']
case_id = re.findall('ID_\d+|id_\d+', str(tags))[0][3:]
caserun['id'] = self.tcms.get_caserun_id(self.runid, case_id)
caserun['tags'] = tags
caserun['status'] = 'RUNNING'
caserun['starttime'] = attrs['starttime']
self.tcms.update_caserun_status(caserun['id'], caserun['status'])
print '\n', '*' * 79
print 'Start Running Time: ', caserun['starttime']
self.caseruns.append(caserun)
def end_test(self, name, attrs):
"""
do sth when one case-run finish.
"""
caserun = self.caseruns[-1]
caserun['status'] = attrs['status']
caserun['endtime'] = attrs['endtime']
caserun['message'] = attrs['message']
if caserun.has_key('logtime'):
caserun['log'] = '\n' + '*' * 30 + '\n' + caserun['logtime'] + \
'\n' + attrs['message'] + '\n' + \
caserun['loginfo'] + '\n' + '*' * 30
else:
caserun['log'] = ''
caserun_status = caserun['status'] + 'ED'
self.tcms.update_caserun_status(caserun['id'], caserun_status)
if caserun['status'] != 'PASS':
self.tcms.update_caserun_log(caserun['id'], caserun['log'])
print 'End Running Time: ', caserun['endtime']
print '*' * 79, '\n'
def log_message(self, message):
"""
do sth when one keyword error
"""
if len(self.caseruns) > 0:
caserun = self.caseruns[-1]
caserun['loginfo'] = message['message']
caserun['logtime'] = message['timestamp']
else:
pass
def close(self):
"""
do sth when all test-caseruns are end.
"""
self.tcms.update_run_status(self.runid, 1)
print '\n', 'AUTOMATION DONE'.center(70, '*'), '\n'
| false | true |
f7217194f4c19697a8e59fe9babfa90a23edf214 | 2,031 | py | Python | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | 4 | 2022-02-04T15:18:31.000Z | 2022-02-07T15:07:43.000Z | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | import pytest
import pathlib
import sys
import requests
import io
import zipfile
import tempfile
import pandas as pd
import os
HERE = pathlib.Path(__file__).resolve().parent
# insert at 1, 0 is the script path (or '' in REPL)
# temporary hack until package is published and we can inherit from there:
sys.path.insert(1, '%s/thin_wrappers' % HERE.parent)
import db_utils as db # NOQA: E402
def headers():
return {'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
}
def download_data():
url = 'https://eforexcel.com/wp/wp-content/uploads/2017/07/100-CC-Records.zip'
res = requests.get(url, headers=headers())
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
with open(temp.name, 'wb') as fp:
fp.write(tmp.read('100 CC Records.csv'))
datum = pd.read_csv(temp.name, encoding='cp1252')
return datum
def test_database():
"""Test that it works writig data to an sqlite db and then read it.
"""
df = download_data()
db.write_db_table('dummy', df, 'replace', 'test_db.sqlite')
assert os.path.exists('test_db.sqlite'), "Did not find database?!"
n_records = len(df)
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(
from_db) == n_records, "Number of records does not match between database and data!"
db.write_db_table('dummy', df, 'append', 'test_db.sqlite')
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(from_db) == (
2 * n_records), "Number of records does not match between database and data!"
if __name__ == '__main__':
pytest.main([__file__])
| 30.313433 | 148 | 0.65485 | import pytest
import pathlib
import sys
import requests
import io
import zipfile
import tempfile
import pandas as pd
import os
HERE = pathlib.Path(__file__).resolve().parent
sys.path.insert(1, '%s/thin_wrappers' % HERE.parent)
import db_utils as db
def headers():
return {'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
}
def download_data():
url = 'https://eforexcel.com/wp/wp-content/uploads/2017/07/100-CC-Records.zip'
res = requests.get(url, headers=headers())
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
with open(temp.name, 'wb') as fp:
fp.write(tmp.read('100 CC Records.csv'))
datum = pd.read_csv(temp.name, encoding='cp1252')
return datum
def test_database():
df = download_data()
db.write_db_table('dummy', df, 'replace', 'test_db.sqlite')
assert os.path.exists('test_db.sqlite'), "Did not find database?!"
n_records = len(df)
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(
from_db) == n_records, "Number of records does not match between database and data!"
db.write_db_table('dummy', df, 'append', 'test_db.sqlite')
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(from_db) == (
2 * n_records), "Number of records does not match between database and data!"
if __name__ == '__main__':
pytest.main([__file__])
| true | true |
f721726ac088dd61876dfef95afdd66374bad3ee | 9,061 | py | Python | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/image/fake.py | 2020human/cinder | 04528318848620e4ce2639ea2dd5323783dc7a1f | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
| 36.833333 | 79 | 0.522238 |
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
class _FakeImageService(object):
def __init__(self):
self.images = {}
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
def detail(self, context, **kwargs):
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
| true | true |
f72173450168ae1e2087830e5c05a6eac53ab7d3 | 3,780 | py | Python | uvAnalyser.py | bacheloruhh/uvAnalyser | b8962305831336fac10cc3b38b44777a28f517ff | [
"MIT"
] | null | null | null | uvAnalyser.py | bacheloruhh/uvAnalyser | b8962305831336fac10cc3b38b44777a28f517ff | [
"MIT"
] | null | null | null | uvAnalyser.py | bacheloruhh/uvAnalyser | b8962305831336fac10cc3b38b44777a28f517ff | [
"MIT"
] | null | null | null | import csv
import os
# ====================
# Default variables:
default_avg_cnt = 10
default_exc_thr = 0.02
default_low_wvl = 300
default_hig_wvl = 1014
default_delimit = '\t'
default_exc_fin = True
# ====================
def welcome():
# Print a welcome screen and ask for user input. Check if input is valid.
# If so return input, if not return default values.
print("Welcome.\nThis script will merge all files in this directory, " +
"normalize them\nand make suggestions for the location of the " +
"first exciton.\nPress 'Enter' for default values (i.e. %d, %.2f).\n"
% (default_avg_cnt, default_exc_thr)
)
avg_cnt = raw_input("Number of baseline values for average: ")
if avg_cnt and valid_input(avg_cnt):
avg_cnt = int(avg_cnt)
else:
avg_cnt = default_avg_cnt
exc_thr = raw_input("Exciton absorbance threshold: ")
if exc_thr and valid_input(exc_thr):
exc_thr = float(exc_thr)
else:
exc_thr = default_exc_thr
print
return avg_cnt, exc_thr
def valid_input(x):
# Check if value is castable into int or float.
try:
int(x) or float(x)
return True
except ValueError:
return False
def read_data():
# Returns data from csv-files in current directory in form of a 2d list.
print "Reading data..."
data = [["name"]]
for i in range(default_low_wvl, default_hig_wvl+1):
data.append([i])
for filename in os.listdir(os.getcwd()):
if filename.endswith("C.txt"):
data[0].append(':'.join([filename[11:13], filename[13:15]]))
with open(filename) as csvfile:
csvfile.next()
reader = csv.reader(csvfile, delimiter='\t')
for index, row in enumerate(reader):
data[index+1].append(row[1])
return data
def normalize_data(data):
# Takes a 2d list, normalizes the values and returns it.
print "Normalizing data..."
dif = default_hig_wvl - default_low_wvl + 1
for col in range(1, len(data[0])):
avg = 0
for x in range(avg_cnt):
avg += float(data[dif-x][col])
avg /= avg_cnt
for row in range(1, dif+1):
data[row][col] = str(float(data[row][col])-avg)
return data
def write_data(data, delim):
# Takes a 2d list and a delimiter and writes a csv-file.
print "Writing data..."
with open("merged_files.txt", 'w') as output_file:
writer = csv.writer(output_file, delimiter=delim)
writer.writerows(data)
def exciton_finder(data):
# Takes a 2d list and writes a file with estimates for the first excitons.
if default_exc_fin:
with open("first_exciton.txt", 'w') as writer:
writer.write("sample\tfirst exciton [nm]")
exc_found = 0
for col in range(1, len(data[0])):
prev = 0
for row in range(len(data)-1, 0, -1):
if float(data[row][col]) > exc_thr:
if float(data[row][col]) < prev:
writer.write("\n%s\t%d" % (data[0][col],
row+default_low_wvl)
)
exc_found += 1
break
prev = float(data[row][col])
if exc_found == 0:
os.remove("first_exciton.txt")
print "%d of %d excitons found." % (exc_found, len(data[0])-1)
else:
print "Exciton finder disabled."
avg_cnt, exc_thr = welcome()
data = read_data()
data = normalize_data(data)
write_data(data, default_delimit)
exciton_finder(data)
raw_input("Press 'Enter' to close window...")
| 30.483871 | 79 | 0.571958 | import csv
import os
default_avg_cnt = 10
default_exc_thr = 0.02
default_low_wvl = 300
default_hig_wvl = 1014
default_delimit = '\t'
default_exc_fin = True
def welcome():
print("Welcome.\nThis script will merge all files in this directory, " +
"normalize them\nand make suggestions for the location of the " +
"first exciton.\nPress 'Enter' for default values (i.e. %d, %.2f).\n"
% (default_avg_cnt, default_exc_thr)
)
avg_cnt = raw_input("Number of baseline values for average: ")
if avg_cnt and valid_input(avg_cnt):
avg_cnt = int(avg_cnt)
else:
avg_cnt = default_avg_cnt
exc_thr = raw_input("Exciton absorbance threshold: ")
if exc_thr and valid_input(exc_thr):
exc_thr = float(exc_thr)
else:
exc_thr = default_exc_thr
print
return avg_cnt, exc_thr
def valid_input(x):
try:
int(x) or float(x)
return True
except ValueError:
return False
def read_data():
print "Reading data..."
data = [["name"]]
for i in range(default_low_wvl, default_hig_wvl+1):
data.append([i])
for filename in os.listdir(os.getcwd()):
if filename.endswith("C.txt"):
data[0].append(':'.join([filename[11:13], filename[13:15]]))
with open(filename) as csvfile:
csvfile.next()
reader = csv.reader(csvfile, delimiter='\t')
for index, row in enumerate(reader):
data[index+1].append(row[1])
return data
def normalize_data(data):
print "Normalizing data..."
dif = default_hig_wvl - default_low_wvl + 1
for col in range(1, len(data[0])):
avg = 0
for x in range(avg_cnt):
avg += float(data[dif-x][col])
avg /= avg_cnt
for row in range(1, dif+1):
data[row][col] = str(float(data[row][col])-avg)
return data
def write_data(data, delim):
print "Writing data..."
with open("merged_files.txt", 'w') as output_file:
writer = csv.writer(output_file, delimiter=delim)
writer.writerows(data)
def exciton_finder(data):
if default_exc_fin:
with open("first_exciton.txt", 'w') as writer:
writer.write("sample\tfirst exciton [nm]")
exc_found = 0
for col in range(1, len(data[0])):
prev = 0
for row in range(len(data)-1, 0, -1):
if float(data[row][col]) > exc_thr:
if float(data[row][col]) < prev:
writer.write("\n%s\t%d" % (data[0][col],
row+default_low_wvl)
)
exc_found += 1
break
prev = float(data[row][col])
if exc_found == 0:
os.remove("first_exciton.txt")
print "%d of %d excitons found." % (exc_found, len(data[0])-1)
else:
print "Exciton finder disabled."
avg_cnt, exc_thr = welcome()
data = read_data()
data = normalize_data(data)
write_data(data, default_delimit)
exciton_finder(data)
raw_input("Press 'Enter' to close window...")
| false | true |
f721754672bebac235baff6704cad30073fc6e3a | 2,231 | py | Python | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | null | null | null | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | null | null | null | recipes/python/template/template/trainingdataloader.py | tumulurik/acp-data-services-dsw-reference | 4ec0a161203a1097069bb5c0044eb6df137c5f6d | [
"Apache-2.0"
] | 1 | 2018-11-15T19:15:50.000Z | 2018-11-15T19:15:50.000Z | #####################################################################
# ADOBE CONFIDENTIAL
# ___________________
#
# Copyright 2017 Adobe
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Adobe and its suppliers, if any. The intellectual
# and technical concepts contained herein are proprietary to Adobe
# and its suppliers and are protected by all applicable intellectual
# property laws, including trade secret and copyright laws.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Adobe.
#####################################################################
import numpy as np
import pandas as pd
from data_access_sdk_python.reader import DataSetReader
def load(configProperties):
# This variable will hold the part of the data on which we train our model
train = None
print("Training Data Load Start")
#########################################
# Extract fields from configProperties
#########################################
# data = configProperties['data']
# train_start = configProperties['train_start']
# train_end = configProperties['train_end']
#########################################
# Load Data
#########################################
### From CSV ###
# df = pd.read_csv(data)
### - OR - From Data Access SDK ###
# prodreader = DataSetReader(ims_url=configProperties['ims_url'],
# catalog_url=configProperties['catalog_url'],
# client_id=configProperties['client_id'],
# client_secret=configProperties['client_secret'],
# code=configProperties['code'])
# df = prodreader.load(configProperties['data_set_id'], configProperties['ims_org'])
#########################################
# Data Preparation/Feature Engineering
#########################################
### Add/Remove/Modify DataFrame below ###
### Then return the training data ###
# test = df[train_start:train_end]
print("Training Data Load Finish")
return train
| 34.859375 | 88 | 0.556253 | true | true | |
f72175eba1256181da7f1dbcf593e18eb8a344a6 | 7,472 | py | Python | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 1 | 2020-06-08T14:00:03.000Z | 2020-06-08T14:00:03.000Z | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | 22 | 2016-09-13T13:31:25.000Z | 2019-05-14T17:07:16.000Z | neo/io/__init__.py | Warfley/python-neo | 875e23a417e1a65d5cb45403e6e3261155e2741d | [
"BSD-3-Clause"
] | null | null | null | """
:mod:`neo.io` provides classes for reading and/or writing
electrophysiological data files.
Note that if the package dependency is not satisfied for one io, it does not
raise an error but a warning.
:attr:`neo.io.iolist` provides a list of successfully imported io classes.
Functions:
.. autofunction:: neo.io.get_io
Classes:
* :attr:`AlphaOmegaIO`
* :attr:`AsciiImageIO`
* :attr:`AsciiSignalIO`
* :attr:`AsciiSpikeTrainIO`
* :attr:`AxographIO`
* :attr:`AxonIO`
* :attr:`BCI2000IO`
* :attr:`BlackrockIO`
* :attr:`BlkIO`
* :attr:`BrainVisionIO`
* :attr:`BrainwareDamIO`
* :attr:`BrainwareF32IO`
* :attr:`BrainwareSrcIO`
* :attr:`ElanIO`
* :attr:`IgorIO`
* :attr:`IntanIO`
* :attr:`KlustaKwikIO`
* :attr:`KwikIO`
* :attr:`MicromedIO`
* :attr:`NeoHdf5IO`
* :attr:`NeoMatlabIO`
* :attr:`NestIO`
* :attr:`NeuralynxIO`
* :attr:`NeuroExplorerIO`
* :attr:`NeuroScopeIO`
* :attr:`NeuroshareIO`
* :attr:`NixIO`
* :attr:`NSDFIO`
* :attr:`OpenEphysIO`
* :attr:`PickleIO`
* :attr:`PlexonIO`
* :attr:`RawBinarySignalIO`
* :attr:`RawMCSIO`
* :attr:`Spike2IO`
* :attr:`StimfitIO`
* :attr:`TdtIO`
* :attr:`TiffIO`
* :attr:`WinEdrIO`
* :attr:`WinWcpIO`
.. autoclass:: neo.io.AlphaOmegaIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiImageIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSpikeTrainIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxographIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BCI2000IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlackrockIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlkIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainVisionIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareDamIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareF32IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareSrcIO
.. autoattribute:: extensions
.. autoclass:: neo.io.ElanIO
.. autoattribute:: extensions
.. .. autoclass:: neo.io.ElphyIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IgorIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IntanIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KlustaKwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.MicromedIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeoHdf5IO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeoMatlabIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NestIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuralynxIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroExplorerIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroScopeIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroshareIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NixIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NSDFIO
.. autoattribute:: extensions
.. autoclass:: neo.io.OpenEphysIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PickleIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PlexonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawBinarySignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawMCSIO
.. autoattribute:: extensions
.. autoclass:: Spike2IO
.. autoattribute:: extensions
.. autoclass:: neo.io.StimfitIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TdtIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TiffIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinEdrIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinWcpIO
.. autoattribute:: extensions
"""
import os.path
# try to import the neuroshare library.
# if it is present, use the neuroshareapiio to load neuroshare files
# if it is not present, use the neurosharectypesio to load files
try:
import neuroshare as ns
except ImportError as err:
from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
# print("\n neuroshare library not found, loading data with ctypes" )
# print("\n to use the API be sure to install the library found at:")
# print("\n www.http://pythonhosted.org/neuroshare/")
else:
from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
# print("neuroshare library successfully imported")
# print("\n loading with API...")
from neo.io.alphaomegaio import AlphaOmegaIO
from neo.io.asciiimageio import AsciiImageIO
from neo.io.asciisignalio import AsciiSignalIO
from neo.io.asciispiketrainio import AsciiSpikeTrainIO
from neo.io.axographio import AxographIO
from neo.io.axonio import AxonIO
from neo.io.blackrockio import BlackrockIO
from neo.io.blackrockio_v4 import BlackrockIO as OldBlackrockIO
from neo.io.blkio import BlkIO
from neo.io.bci2000io import BCI2000IO
from neo.io.brainvisionio import BrainVisionIO
from neo.io.brainwaredamio import BrainwareDamIO
from neo.io.brainwaref32io import BrainwareF32IO
from neo.io.brainwaresrcio import BrainwareSrcIO
from neo.io.elanio import ElanIO
# from neo.io.elphyio import ElphyIO
from neo.io.exampleio import ExampleIO
from neo.io.igorproio import IgorIO
from neo.io.intanio import IntanIO
from neo.io.klustakwikio import KlustaKwikIO
from neo.io.kwikio import KwikIO
from neo.io.micromedio import MicromedIO
from neo.io.hdf5io import NeoHdf5IO
from neo.io.neomatlabio import NeoMatlabIO
from neo.io.nestio import NestIO
from neo.io.neuralynxio import NeuralynxIO
from neo.io.neuralynxio_v1 import NeuralynxIO as OldNeuralynxIO
from neo.io.neuroexplorerio import NeuroExplorerIO
from neo.io.neuroscopeio import NeuroScopeIO
from neo.io.nixio import NixIO
from neo.io.nixio_fr import NixIO as NixIOFr
from neo.io.nsdfio import NSDFIO
from neo.io.openephysio import OpenEphysIO
from neo.io.pickleio import PickleIO
from neo.io.plexonio import PlexonIO
from neo.io.rawbinarysignalio import RawBinarySignalIO
from neo.io.rawmcsio import RawMCSIO
from neo.io.spike2io import Spike2IO
from neo.io.stimfitio import StimfitIO
from neo.io.tdtio import TdtIO
from neo.io.tiffio import TiffIO
from neo.io.winedrio import WinEdrIO
from neo.io.winwcpio import WinWcpIO
iolist = [
AlphaOmegaIO,
AsciiImageIO,
AsciiSignalIO,
AsciiSpikeTrainIO,
AxographIO,
AxonIO,
BCI2000IO,
BlackrockIO,
BlkIO,
BrainVisionIO,
BrainwareDamIO,
BrainwareF32IO,
BrainwareSrcIO,
ElanIO,
# ElphyIO,
ExampleIO,
IgorIO,
IntanIO,
KlustaKwikIO,
KwikIO,
MicromedIO,
NixIO, # place NixIO before NeoHdf5IO to make it the default for .h5 files
NeoHdf5IO,
NeoMatlabIO,
NestIO,
NeuralynxIO,
NeuroExplorerIO,
NeuroScopeIO,
NeuroshareIO,
NSDFIO,
OpenEphysIO,
PickleIO,
PlexonIO,
RawBinarySignalIO,
RawMCSIO,
Spike2IO,
StimfitIO,
TdtIO,
TiffIO,
WinEdrIO,
WinWcpIO
]
def get_io(filename, *args, **kwargs):
"""
Return a Neo IO instance, guessing the type based on the filename suffix.
"""
extension = os.path.splitext(filename)[1][1:]
for io in iolist:
if extension in io.extensions:
return io(filename, *args, **kwargs)
raise IOError("File extension %s not registered" % extension)
| 22.172107 | 79 | 0.720021 |
import os.path
try:
import neuroshare as ns
except ImportError as err:
from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
else:
from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
from neo.io.alphaomegaio import AlphaOmegaIO
from neo.io.asciiimageio import AsciiImageIO
from neo.io.asciisignalio import AsciiSignalIO
from neo.io.asciispiketrainio import AsciiSpikeTrainIO
from neo.io.axographio import AxographIO
from neo.io.axonio import AxonIO
from neo.io.blackrockio import BlackrockIO
from neo.io.blackrockio_v4 import BlackrockIO as OldBlackrockIO
from neo.io.blkio import BlkIO
from neo.io.bci2000io import BCI2000IO
from neo.io.brainvisionio import BrainVisionIO
from neo.io.brainwaredamio import BrainwareDamIO
from neo.io.brainwaref32io import BrainwareF32IO
from neo.io.brainwaresrcio import BrainwareSrcIO
from neo.io.elanio import ElanIO
from neo.io.exampleio import ExampleIO
from neo.io.igorproio import IgorIO
from neo.io.intanio import IntanIO
from neo.io.klustakwikio import KlustaKwikIO
from neo.io.kwikio import KwikIO
from neo.io.micromedio import MicromedIO
from neo.io.hdf5io import NeoHdf5IO
from neo.io.neomatlabio import NeoMatlabIO
from neo.io.nestio import NestIO
from neo.io.neuralynxio import NeuralynxIO
from neo.io.neuralynxio_v1 import NeuralynxIO as OldNeuralynxIO
from neo.io.neuroexplorerio import NeuroExplorerIO
from neo.io.neuroscopeio import NeuroScopeIO
from neo.io.nixio import NixIO
from neo.io.nixio_fr import NixIO as NixIOFr
from neo.io.nsdfio import NSDFIO
from neo.io.openephysio import OpenEphysIO
from neo.io.pickleio import PickleIO
from neo.io.plexonio import PlexonIO
from neo.io.rawbinarysignalio import RawBinarySignalIO
from neo.io.rawmcsio import RawMCSIO
from neo.io.spike2io import Spike2IO
from neo.io.stimfitio import StimfitIO
from neo.io.tdtio import TdtIO
from neo.io.tiffio import TiffIO
from neo.io.winedrio import WinEdrIO
from neo.io.winwcpio import WinWcpIO
iolist = [
AlphaOmegaIO,
AsciiImageIO,
AsciiSignalIO,
AsciiSpikeTrainIO,
AxographIO,
AxonIO,
BCI2000IO,
BlackrockIO,
BlkIO,
BrainVisionIO,
BrainwareDamIO,
BrainwareF32IO,
BrainwareSrcIO,
ElanIO,
ExampleIO,
IgorIO,
IntanIO,
KlustaKwikIO,
KwikIO,
MicromedIO,
NixIO,
NeoHdf5IO,
NeoMatlabIO,
NestIO,
NeuralynxIO,
NeuroExplorerIO,
NeuroScopeIO,
NeuroshareIO,
NSDFIO,
OpenEphysIO,
PickleIO,
PlexonIO,
RawBinarySignalIO,
RawMCSIO,
Spike2IO,
StimfitIO,
TdtIO,
TiffIO,
WinEdrIO,
WinWcpIO
]
def get_io(filename, *args, **kwargs):
extension = os.path.splitext(filename)[1][1:]
for io in iolist:
if extension in io.extensions:
return io(filename, *args, **kwargs)
raise IOError("File extension %s not registered" % extension)
| true | true |
f721766457ab8501938015654594e370b906deb0 | 3,890 | py | Python | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | 1 | 2022-03-29T21:18:53.000Z | 2022-03-29T21:18:53.000Z | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | null | null | null | workflow/scripts/combine_virsorter_virfinder.py | rdenise/virome_pipeline | 3c629aef75b184bf39f2d14043f94e8787e3ea14 | [
"MIT"
] | null | null | null | from Bio import SeqIO
import pandas as pd
import sys
import os
# Put error and out into the log file
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
###########################################################
###########################################################
# List that will contains all the contigs to filter
all_contig_ids = []
# Dataframe that contains all the informations about
output_df = pd.DataFrame(columns=["contig_id", "virsorter_cat", "deepvirfinder"])
# Get all the names from the virsorter keep2 list
ids_virsorter_keep2 = snakemake.input.ids_virsorter_keep2_checked
with open(ids_virsorter_keep2) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
all_contig_ids.append(rstrip_line)
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "virsorter_cat"] = "keep2_checked"
# Get all the names from the virsorter keep1 list and remove redondant name
ids_virsorter_keep1 = snakemake.input.ids_virsorter_keep1
with open(ids_virsorter_keep1) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
if rstrip_line not in all_contig_ids:
all_contig_ids.append(rstrip_line)
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "virsorter_cat"] = "keep1"
# Get all the names from the deepvirfinder list and remove redondant name
ids_virfinder = snakemake.input.ids_virfinder
with open(ids_virfinder) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
output_df.at[rstrip_line, "contig_id"] = rstrip_line
output_df.at[rstrip_line, "deepvirfinder"] = "Yes"
if rstrip_line not in all_contig_ids:
all_contig_ids.append(rstrip_line)
# Fill the informations missing now the list of contigs we keep is set
dict_map_virsorter = {}
files_with_info = {
snakemake.input.ids_virsorter_keep2_suspicious: "keep2_suspicious",
snakemake.input.ids_virsorter_manual_check: "to_manual_check",
snakemake.input.ids_virsorter_discarded: "discarded",
}
for file_ids in files_with_info:
with open(file_ids) as r_file:
r_file.readline()
for line in r_file:
rstrip_line = line.rstrip()
rstrip_line = rstrip_line.split("||")[0]
if rstrip_line not in all_contig_ids:
dict_map_virsorter[rstrip_line] = files_with_info[file_ids]
# Fill the dataframe
list_contig2add_virsorter_cat = list(dict_map_virsorter.keys())
output_df.loc[
output_df.contig_id.isin(list_contig2add_virsorter_cat), "virsorter_cat"
] = output_df.loc[
output_df.contig_id.isin(list_contig2add_virsorter_cat), "contig_id"
].map(
dict_map_virsorter
)
output_df.fillna("No", inplace=True)
# Parse the fasta of the contig and create the new one
fasta_contigs = snakemake.input.contigs
with open(snakemake.output.fasta, "w") as w_file:
with open(snakemake.output.translation_table, "w") as tsv_file:
tsv_file.write("old_contig_name\tnew_contig_name\n")
parser = SeqIO.parse(fasta_contigs, "fasta")
for contig in parser:
if contig.id in all_contig_ids:
contig_id = f"{snakemake.wildcards.sample}-{contig.id}".replace(
"_", "-"
)
tsv_file.write(f"{contig.id}\t{contig_id}\n")
contig.id = contig_id
contig.name = ""
contig.description = ""
SeqIO.write(contig, w_file, "fasta")
output_df.to_csv(snakemake.output.tsv, sep="\t", index=False)
###########################################################
###########################################################
| 31.626016 | 81 | 0.648072 | from Bio import SeqIO
import pandas as pd
import sys
import os
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
| true | true |
f7217746e68b217cef673ded6405c62a5976ac18 | 5,365 | py | Python | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-04-02T15:33:12.000Z | 2021-04-02T15:33:12.000Z | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | null | null | null | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-08-14T09:07:22.000Z | 2021-08-14T09:07:22.000Z | import math
import os
from math import log10
# noinspection PyPackageRequirements
import cv2
import numpy as np
from scipy.ndimage import distance_transform_edt
import config_main
from Utils.log_handler import log_setup_info_to_console, log_error_to_console, log_benchmark_info_to_console
from Benchmarking.Util.image_parsing import find_img_extension
from Benchmarking.Config.create_benchmark_job import set_gt_location, set_image_set, set_input_location, job_set
def rde_calc(img, img_gt, k_value):
"""
Dubuisson, M.P.; Jain, A.K. A modified Hausdorff distance for object matching. IEEE ICPR 1994, 1, 566-568
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.1.8155&rep=rep1&type=pdf
:param img: edge map resulting of algorithm
:param img_gt: ground truth image
:return: psnr value for image
"""
# calculate distances
dist_gt = distance_transform_edt(np.invert(img_gt))
dist_dc = distance_transform_edt(np.invert(img))
# calculate sum(d^k(D))
sum_dc = 0.0
sum_gt = 0.0
left = 0.0
right = 0.0
for i in range(0, img_gt.shape[0]):
for j in range(0, img_gt.shape[1]):
if img_gt[i, j]:
sum_dc += dist_dc[i, j] ** k_value
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j]:
sum_gt += dist_gt[i, j] ** k_value
cn_cd = np.count_nonzero(img)
cn_gt = np.count_nonzero(img_gt)
if cn_cd != 0 :
left = math.pow(sum_gt / cn_cd, 1.0/k_value)
if cn_gt != 0:
right = math.pow(sum_dc / cn_gt, 1.0/k_value)
if cn_cd==0:
rde = 1000
else:
rde = left + right
return rde
# noinspection PyPep8Naming
def run_RDE_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list,
k: int):
"""
xxx
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_RDE(k)
def run_CM_benchmark_RDE(k_value):
"""
:return:
"""
log_setup_info_to_console("BENCHMARKING CM RDEK" + int(k_value).__str__())
idx = 0
for set in config_main.BENCHMARK_SETS:
log_benchmark_info_to_console('Current set: {number}\{total} : {set}'.format(number=idx, total=len(config_main.BENCHMARK_SETS), set=set))
idx += 1
# try:
if True:
# Write results to disk
results_path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, "RDEK" + int(k_value).__str__())
if not os.path.exists(results_path):
os.makedirs(results_path)
csv = open(os.path.join(results_path, set + '.log'), "w+")
csv.write('Per image (#, RDEK' + int(k_value).__str__() + ':\n')
# log_benchmark_info_to_console('Per image (#, RDE):\n')
avg = 0
count = 0
for file in config_main.BENCHMARK_SAMPLE_NAMES:
# find extension of images and gt_images
if config_main.APPL_SAVE_JOB_NAME is True:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file))
else:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file))
gt_extension = find_img_extension(os.path.join(config_main.BENCHMARK_GT_LOCATION, file))
path_img_gt = os.path.join(config_main.BENCHMARK_GT_LOCATION, file + gt_extension)
if config_main.APPL_SAVE_JOB_NAME is True:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file + img_extension)
else:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file + img_extension)
img_gt = cv2.cvtColor(cv2.imread(path_img_gt), cv2.COLOR_BGR2GRAY)
img_al = cv2.cvtColor(cv2.imread(path_img_al), cv2.COLOR_BGR2GRAY)
try:
val = rde_calc(img_al, img_gt, k_value)
avg += val
count += 1
csv.write('{:<10s} {:<10.6f}\n'.format(file, val))
# log_benchmark_info_to_console('{:<10s} {:<10.6f}\n'.format(file, val))
except Exception as ex:
log_error_to_console("BENCHMARK CM RDEK{val}: {file}".format(val=int(k_value).__str__(), file=file), ex.__str__())
log_benchmark_info_to_console('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
csv.write('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
# except Exception as ex:
# log_error_to_console('BENCHMARK CM RDEK' + int(k_value).__str__() + 'NOK', ex.__str__())
if __name__ == "__main__":
pass
| 37.517483 | 146 | 0.608574 | import math
import os
from math import log10
import cv2
import numpy as np
from scipy.ndimage import distance_transform_edt
import config_main
from Utils.log_handler import log_setup_info_to_console, log_error_to_console, log_benchmark_info_to_console
from Benchmarking.Util.image_parsing import find_img_extension
from Benchmarking.Config.create_benchmark_job import set_gt_location, set_image_set, set_input_location, job_set
def rde_calc(img, img_gt, k_value):
dist_gt = distance_transform_edt(np.invert(img_gt))
dist_dc = distance_transform_edt(np.invert(img))
sum_dc = 0.0
sum_gt = 0.0
left = 0.0
right = 0.0
for i in range(0, img_gt.shape[0]):
for j in range(0, img_gt.shape[1]):
if img_gt[i, j]:
sum_dc += dist_dc[i, j] ** k_value
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j]:
sum_gt += dist_gt[i, j] ** k_value
cn_cd = np.count_nonzero(img)
cn_gt = np.count_nonzero(img_gt)
if cn_cd != 0 :
left = math.pow(sum_gt / cn_cd, 1.0/k_value)
if cn_gt != 0:
right = math.pow(sum_dc / cn_gt, 1.0/k_value)
if cn_cd==0:
rde = 1000
else:
rde = left + right
return rde
def run_RDE_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list,
k: int):
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_RDE(k)
def run_CM_benchmark_RDE(k_value):
log_setup_info_to_console("BENCHMARKING CM RDEK" + int(k_value).__str__())
idx = 0
for set in config_main.BENCHMARK_SETS:
log_benchmark_info_to_console('Current set: {number}\{total} : {set}'.format(number=idx, total=len(config_main.BENCHMARK_SETS), set=set))
idx += 1
if True:
results_path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, "RDEK" + int(k_value).__str__())
if not os.path.exists(results_path):
os.makedirs(results_path)
csv = open(os.path.join(results_path, set + '.log'), "w+")
csv.write('Per image (#, RDEK' + int(k_value).__str__() + ':\n')
avg = 0
count = 0
for file in config_main.BENCHMARK_SAMPLE_NAMES:
if config_main.APPL_SAVE_JOB_NAME is True:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file))
else:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file))
gt_extension = find_img_extension(os.path.join(config_main.BENCHMARK_GT_LOCATION, file))
path_img_gt = os.path.join(config_main.BENCHMARK_GT_LOCATION, file + gt_extension)
if config_main.APPL_SAVE_JOB_NAME is True:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file + img_extension)
else:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file + img_extension)
img_gt = cv2.cvtColor(cv2.imread(path_img_gt), cv2.COLOR_BGR2GRAY)
img_al = cv2.cvtColor(cv2.imread(path_img_al), cv2.COLOR_BGR2GRAY)
try:
val = rde_calc(img_al, img_gt, k_value)
avg += val
count += 1
csv.write('{:<10s} {:<10.6f}\n'.format(file, val))
except Exception as ex:
log_error_to_console("BENCHMARK CM RDEK{val}: {file}".format(val=int(k_value).__str__(), file=file), ex.__str__())
log_benchmark_info_to_console('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
csv.write('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
if __name__ == "__main__":
pass
| true | true |
f7217797ff9948fe15504b1554d32d09382f057d | 3,899 | py | Python | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle-demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | 1 | 2021-07-07T11:04:11.000Z | 2021-07-07T11:04:11.000Z | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle_demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | null | null | null | PaddleCV/tracking/ltr/data/loader.py | zhousanfu/paddle_demo | 56860c5241874fe6111def46ea2f3f91e3ba80de | [
"Apache-2.0"
] | 1 | 2021-05-18T06:36:32.000Z | 2021-05-18T06:36:32.000Z | import os
import sys
import dataflow as df
import numpy as np
class LTRLoader(df.DataFlow):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Note: an additional option stack_dim is available to
select along which dimension the data should be stacked to form a batch.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
stack_dim (int): Dimension along which to stack to form the batch. (default: 0)
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: None)
.. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
__initialized = False
def __init__(self,
name,
dataset,
training=True,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
epoch_interval=1,
collate_fn=None,
stack_dim=0,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None):
super().__init__()
ds = df.RepeatedData(dataset, -1)
ds = df.MultiProcessRunnerZMQ(ds, num_proc=num_workers, hwm=300)
# ds = df.MultiThreadRunner(lambda: ds, num_prefetch=1024, num_thread=num_workers)
ds = df.BatchData(ds, batch_size)
self.ds = ds
self.name = name
self.training = training
self.epoch_interval = epoch_interval
self.stack_dim = stack_dim
self.batches_per_epoch = len(dataset) // batch_size
def __len__(self):
return self.batches_per_epoch
def __iter__(self):
if not self.__initialized:
self.reset_state()
self.__initialized = True
for d in self.ds:
if self.stack_dim > 0:
for k, v in d.items():
if len(v.shape) >= self.stack_dim + 1:
d[k] = np.swapaxes(v, 0, self.stack_dim)
yield d
def reset_state(self):
self.ds.reset_state()
| 39.383838 | 90 | 0.60118 | import os
import sys
import dataflow as df
import numpy as np
class LTRLoader(df.DataFlow):
__initialized = False
def __init__(self,
name,
dataset,
training=True,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
epoch_interval=1,
collate_fn=None,
stack_dim=0,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None):
super().__init__()
ds = df.RepeatedData(dataset, -1)
ds = df.MultiProcessRunnerZMQ(ds, num_proc=num_workers, hwm=300)
ds = df.BatchData(ds, batch_size)
self.ds = ds
self.name = name
self.training = training
self.epoch_interval = epoch_interval
self.stack_dim = stack_dim
self.batches_per_epoch = len(dataset) // batch_size
def __len__(self):
return self.batches_per_epoch
def __iter__(self):
if not self.__initialized:
self.reset_state()
self.__initialized = True
for d in self.ds:
if self.stack_dim > 0:
for k, v in d.items():
if len(v.shape) >= self.stack_dim + 1:
d[k] = np.swapaxes(v, 0, self.stack_dim)
yield d
def reset_state(self):
self.ds.reset_state()
| true | true |
f72177dda3702aa0aa6df33982088a3eb433c9ba | 13,260 | py | Python | Lib/test/test_module.py | ErikBjare/cpython | b68431fadb3150134ac6ccbf501cdfeaf4c75678 | [
"0BSD"
] | 5 | 2021-12-03T23:11:53.000Z | 2022-01-08T21:02:50.000Z | Lib/test/test_module.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 8 | 2022-01-07T11:31:11.000Z | 2022-03-04T00:07:16.000Z | Lib/test/test_module.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 1 | 2022-03-27T18:34:54.000Z | 2022-03-27T18:34:54.000Z | # Test the module type
import unittest
import weakref
from test.support import gc_collect
from test.support import import_helper
from test.support.script_helper import assert_python_ok
import sys
ModuleType = type(sys)
class FullLoader:
@classmethod
def module_repr(cls, m):
return "<module '{}' (crafted)>".format(m.__name__)
class BareLoader:
pass
class ModuleTests(unittest.TestCase):
def test_uninitialized(self):
# An uninitialized module has no __dict__ or __name__,
# and __doc__ is None
foo = ModuleType.__new__(ModuleType)
self.assertTrue(isinstance(foo.__dict__, dict))
self.assertEqual(dir(foo), [])
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_uninitialized_missing_getattr(self):
# Issue 8297
# test the text in the AttributeError of an uninitialized module
foo = ModuleType.__new__(ModuleType)
self.assertRaisesRegex(
AttributeError, "module has no attribute 'not_here'",
getattr, foo, "not_here")
def test_missing_getattr(self):
# Issue 8297
# test the text in the AttributeError
foo = ModuleType("foo")
self.assertRaisesRegex(
AttributeError, "module 'foo' has no attribute 'not_here'",
getattr, foo, "not_here")
def test_no_docstring(self):
# Regularly initialized module, no docstring
foo = ModuleType("foo")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(foo.__dict__, {"__name__": "foo", "__doc__": None,
"__loader__": None, "__package__": None,
"__spec__": None})
def test_ascii_docstring(self):
# ASCII docstring
foo = ModuleType("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_unicode_docstring(self):
# Unicode docstring
foo = ModuleType("foo", "foodoc\u1234")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc\u1234",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_reinit(self):
# Reinitialization should not replace the __dict__
foo = ModuleType("foo", "foodoc\u1234")
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.bar, 42)
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc", "bar": 42,
"__loader__": None, "__package__": None, "__spec__": None})
self.assertTrue(foo.__dict__ is d)
def test_dont_clear_dict(self):
# See issue 7140.
def f():
foo = ModuleType("foo")
foo.bar = 4
return foo
gc_collect()
self.assertEqual(f().__dict__["bar"], 4)
def test_clear_dict_in_ref_cycle(self):
destroyed = []
m = ModuleType("foo")
m.destroyed = destroyed
s = """class A:
def __init__(self, l):
self.l = l
def __del__(self):
self.l.append(1)
a = A(destroyed)"""
exec(s, m.__dict__)
del m
gc_collect()
self.assertEqual(destroyed, [1])
def test_weakref(self):
m = ModuleType("foo")
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_module_getattr(self):
import test.good_getattr as gga
from test.good_getattr import test
self.assertEqual(test, "There is test")
self.assertEqual(gga.x, 1)
self.assertEqual(gga.y, 2)
with self.assertRaisesRegex(AttributeError,
"Deprecated, use whatever instead"):
gga.yolo
self.assertEqual(gga.whatever, "There is whatever")
del sys.modules['test.good_getattr']
def test_module_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_dir(self):
import test.good_getattr as gga
self.assertEqual(dir(gga), ['a', 'b', 'c'])
del sys.modules['test.good_getattr']
def test_module_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_getattr_tricky(self):
from test import bad_getattr3
# these lookups should not crash
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if 'test.bad_getattr3' in sys.modules:
del sys.modules['test.bad_getattr3']
def test_module_repr_minimal(self):
# reprs when modules have no __file__, __name__, or __loader__
m = ModuleType('foo')
del m.__name__
self.assertEqual(repr(m), "<module '?'>")
def test_module_repr_with_name(self):
m = ModuleType('foo')
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_name_and_filename(self):
m = ModuleType('foo')
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_filename_only(self):
m = ModuleType('foo')
del m.__name__
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_module_repr_with_loader_as_None(self):
m = ModuleType('foo')
assert m.__loader__ is None
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_bare_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = BareLoader
loader_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_full_loader_but_no_name(self):
# m.__loader__.module_repr() will fail because the module has no
# m.__name__. This exception will get suppressed and instead the
# loader's repr will be used.
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = FullLoader
loader_repr = repr(FullLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_bare_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
module_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module 'foo' ({})>".format(module_repr))
def test_module_repr_with_full_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
self.assertEqual(
repr(m), "<module 'foo' (crafted)>")
def test_module_repr_with_bare_loader_and_filename(self):
# Because the loader has no module_repr(), use the file name.
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_full_loader_and_filename(self):
# Even though the module has an __file__, use __loader__.module_repr()
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' (crafted)>")
def test_module_repr_builtin(self):
self.assertEqual(repr(sys), "<module 'sys' (built-in)>")
def test_module_repr_source(self):
r = repr(unittest)
starts_with = "<module 'unittest' from '"
ends_with = "__init__.py'>"
self.assertEqual(r[:len(starts_with)], starts_with,
'{!r} does not start with {!r}'.format(r, starts_with))
self.assertEqual(r[-len(ends_with):], ends_with,
'{!r} does not end with {!r}'.format(r, ends_with))
def test_module_finalization_at_shutdown(self):
# Module globals and builtins should still be available during shutdown
rc, out, err = assert_python_ok("-c", "from test import final_a")
self.assertFalse(err)
lines = out.splitlines()
self.assertEqual(set(lines), {
b"x = a",
b"x = b",
b"final_a.x = a",
b"final_b.x = b",
b"len = len",
b"shutil.rmtree = rmtree"})
def test_descriptor_errors_propagate(self):
class Descr:
def __get__(self, o, t):
raise RuntimeError
class M(ModuleType):
melon = Descr()
self.assertRaises(RuntimeError, getattr, M("mymod"), "melon")
def test_lazy_create_annotations(self):
# module objects lazy create their __annotations__ dict on demand.
# the annotations dict is stored in module.__dict__.
# a freshly created module shouldn't have an annotations dict yet.
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = foo.__annotations__
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_setting_annotations(self):
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = {'a': int}
foo.__annotations__ = d
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_annotations_getset_raises(self):
# double delete
foo = ModuleType("foo")
foo.__annotations__ = {}
del foo.__annotations__
with self.assertRaises(AttributeError):
del foo.__annotations__
def test_annotations_are_created_correctly(self):
ann_module4 = import_helper.import_fresh_module('test.ann_module4')
self.assertTrue("__annotations__" in ann_module4.__dict__)
del ann_module4.__annotations__
self.assertFalse("__annotations__" in ann_module4.__dict__)
def test_repeated_attribute_pops(self):
# Repeated accesses to module attribute will be specialized
# Check that popping the attribute doesn't break it
m = ModuleType("test")
d = m.__dict__
count = 0
for _ in range(100):
m.attr = 1
count += m.attr # Might be specialized
d.pop("attr")
self.assertEqual(count, 100)
# frozen and namespace module reprs are tested in importlib.
def test_subclass_with_slots(self):
# In 3.11alpha this crashed, as the slots weren't NULLed.
class ModuleWithSlots(ModuleType):
__slots__ = ("a", "b")
def __init__(self, name):
super().__init__(name)
m = ModuleWithSlots("name")
with self.assertRaises(AttributeError):
m.a
with self.assertRaises(AttributeError):
m.b
m.a, m.b = 1, 2
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
if __name__ == '__main__':
unittest.main()
| 35.74124 | 80 | 0.597511 |
import unittest
import weakref
from test.support import gc_collect
from test.support import import_helper
from test.support.script_helper import assert_python_ok
import sys
ModuleType = type(sys)
class FullLoader:
@classmethod
def module_repr(cls, m):
return "<module '{}' (crafted)>".format(m.__name__)
class BareLoader:
pass
class ModuleTests(unittest.TestCase):
def test_uninitialized(self):
foo = ModuleType.__new__(ModuleType)
self.assertTrue(isinstance(foo.__dict__, dict))
self.assertEqual(dir(foo), [])
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, ModuleType.__doc__)
def test_uninitialized_missing_getattr(self):
foo = ModuleType.__new__(ModuleType)
self.assertRaisesRegex(
AttributeError, "module has no attribute 'not_here'",
getattr, foo, "not_here")
def test_missing_getattr(self):
foo = ModuleType("foo")
self.assertRaisesRegex(
AttributeError, "module 'foo' has no attribute 'not_here'",
getattr, foo, "not_here")
def test_no_docstring(self):
foo = ModuleType("foo")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(foo.__dict__, {"__name__": "foo", "__doc__": None,
"__loader__": None, "__package__": None,
"__spec__": None})
def test_ascii_docstring(self):
foo = ModuleType("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_unicode_docstring(self):
foo = ModuleType("foo", "foodoc\u1234")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc\u1234",
"__loader__": None, "__package__": None,
"__spec__": None})
def test_reinit(self):
foo = ModuleType("foo", "foodoc\u1234")
foo.bar = 42
d = foo.__dict__
foo.__init__("foo", "foodoc")
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(foo.bar, 42)
self.assertEqual(foo.__dict__,
{"__name__": "foo", "__doc__": "foodoc", "bar": 42,
"__loader__": None, "__package__": None, "__spec__": None})
self.assertTrue(foo.__dict__ is d)
def test_dont_clear_dict(self):
def f():
foo = ModuleType("foo")
foo.bar = 4
return foo
gc_collect()
self.assertEqual(f().__dict__["bar"], 4)
def test_clear_dict_in_ref_cycle(self):
destroyed = []
m = ModuleType("foo")
m.destroyed = destroyed
s = """class A:
def __init__(self, l):
self.l = l
def __del__(self):
self.l.append(1)
a = A(destroyed)"""
exec(s, m.__dict__)
del m
gc_collect()
self.assertEqual(destroyed, [1])
def test_weakref(self):
m = ModuleType("foo")
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_module_getattr(self):
import test.good_getattr as gga
from test.good_getattr import test
self.assertEqual(test, "There is test")
self.assertEqual(gga.x, 1)
self.assertEqual(gga.y, 2)
with self.assertRaisesRegex(AttributeError,
"Deprecated, use whatever instead"):
gga.yolo
self.assertEqual(gga.whatever, "There is whatever")
del sys.modules['test.good_getattr']
def test_module_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_dir(self):
import test.good_getattr as gga
self.assertEqual(dir(gga), ['a', 'b', 'c'])
del sys.modules['test.good_getattr']
def test_module_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules['test.bad_getattr']
if 'test.bad_getattr2' in sys.modules:
del sys.modules['test.bad_getattr2']
def test_module_getattr_tricky(self):
from test import bad_getattr3
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if 'test.bad_getattr3' in sys.modules:
del sys.modules['test.bad_getattr3']
def test_module_repr_minimal(self):
m = ModuleType('foo')
del m.__name__
self.assertEqual(repr(m), "<module '?'>")
def test_module_repr_with_name(self):
m = ModuleType('foo')
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_name_and_filename(self):
m = ModuleType('foo')
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_filename_only(self):
m = ModuleType('foo')
del m.__name__
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_module_repr_with_loader_as_None(self):
m = ModuleType('foo')
assert m.__loader__ is None
self.assertEqual(repr(m), "<module 'foo'>")
def test_module_repr_with_bare_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
m.__loader__ = BareLoader
loader_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_full_loader_but_no_name(self):
m = ModuleType('foo')
del m.__name__
# Yes, a class not an instance.
m.__loader__ = FullLoader
loader_repr = repr(FullLoader)
self.assertEqual(
repr(m), "<module '?' ({})>".format(loader_repr))
def test_module_repr_with_bare_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
module_repr = repr(BareLoader)
self.assertEqual(
repr(m), "<module 'foo' ({})>".format(module_repr))
def test_module_repr_with_full_loader(self):
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
self.assertEqual(
repr(m), "<module 'foo' (crafted)>")
def test_module_repr_with_bare_loader_and_filename(self):
# Because the loader has no module_repr(), use the file name.
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = BareLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_module_repr_with_full_loader_and_filename(self):
# Even though the module has an __file__, use __loader__.module_repr()
m = ModuleType('foo')
# Yes, a class not an instance.
m.__loader__ = FullLoader
m.__file__ = '/tmp/foo.py'
self.assertEqual(repr(m), "<module 'foo' (crafted)>")
def test_module_repr_builtin(self):
self.assertEqual(repr(sys), "<module 'sys' (built-in)>")
def test_module_repr_source(self):
r = repr(unittest)
starts_with = "<module 'unittest' from '"
ends_with = "__init__.py'>"
self.assertEqual(r[:len(starts_with)], starts_with,
'{!r} does not start with {!r}'.format(r, starts_with))
self.assertEqual(r[-len(ends_with):], ends_with,
'{!r} does not end with {!r}'.format(r, ends_with))
def test_module_finalization_at_shutdown(self):
# Module globals and builtins should still be available during shutdown
rc, out, err = assert_python_ok("-c", "from test import final_a")
self.assertFalse(err)
lines = out.splitlines()
self.assertEqual(set(lines), {
b"x = a",
b"x = b",
b"final_a.x = a",
b"final_b.x = b",
b"len = len",
b"shutil.rmtree = rmtree"})
def test_descriptor_errors_propagate(self):
class Descr:
def __get__(self, o, t):
raise RuntimeError
class M(ModuleType):
melon = Descr()
self.assertRaises(RuntimeError, getattr, M("mymod"), "melon")
def test_lazy_create_annotations(self):
# module objects lazy create their __annotations__ dict on demand.
# the annotations dict is stored in module.__dict__.
# a freshly created module shouldn't have an annotations dict yet.
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = foo.__annotations__
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_setting_annotations(self):
foo = ModuleType("foo")
for i in range(4):
self.assertFalse("__annotations__" in foo.__dict__)
d = {'a': int}
foo.__annotations__ = d
self.assertTrue("__annotations__" in foo.__dict__)
self.assertEqual(foo.__annotations__, d)
self.assertEqual(foo.__dict__['__annotations__'], d)
if i % 2:
del foo.__annotations__
else:
del foo.__dict__['__annotations__']
def test_annotations_getset_raises(self):
foo = ModuleType("foo")
foo.__annotations__ = {}
del foo.__annotations__
with self.assertRaises(AttributeError):
del foo.__annotations__
def test_annotations_are_created_correctly(self):
ann_module4 = import_helper.import_fresh_module('test.ann_module4')
self.assertTrue("__annotations__" in ann_module4.__dict__)
del ann_module4.__annotations__
self.assertFalse("__annotations__" in ann_module4.__dict__)
def test_repeated_attribute_pops(self):
m = ModuleType("test")
d = m.__dict__
count = 0
for _ in range(100):
m.attr = 1
count += m.attr # Might be specialized
d.pop("attr")
self.assertEqual(count, 100)
# frozen and namespace module reprs are tested in importlib.
def test_subclass_with_slots(self):
# In 3.11alpha this crashed, as the slots weren't NULLed.
class ModuleWithSlots(ModuleType):
__slots__ = ("a", "b")
def __init__(self, name):
super().__init__(name)
m = ModuleWithSlots("name")
with self.assertRaises(AttributeError):
m.a
with self.assertRaises(AttributeError):
m.b
m.a, m.b = 1, 2
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
if __name__ == '__main__':
unittest.main()
| true | true |
f7217a596eab242de146ed6262830949ee89e841 | 3,214 | py | Python | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | tsa/links/crawl.py | chbrown/topic-sentiment-authorship | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import socket
import urllib.parse
from datetime import datetime
import requests
import requests.exceptions as reqexc
import sqlalchemy.exc as sqlexc
from tsa import stdoutn
from tsa.lib import html
from tsa.models import Endpoint, create_session
from tsa import logging
logger = logging.getLogger(__name__)
whitespace_translations = dict((ord(whitespace), ' ') for whitespace in '\t\n\r')
def add_url(url, parent_id=None):
DBSession = create_session()
endpoint = Endpoint(url=url, parent_id=parent_id)
DBSession.add(endpoint)
try:
DBSession.commit()
except sqlexc.IntegrityError as exc:
# simply ignore duplicates
DBSession.rollback()
print(exc)
def process_untried_endpoints():
DBSession = create_session()
# id, parent_id, url, status_code, redirect, html, content, created, accessed, timeout
# find endpoints that aren't already fetched
query = DBSession.query(Endpoint).\
filter(Endpoint.status_code == None).\
filter(Endpoint.timeout == None).\
filter(Endpoint.error == None).\
order_by(Endpoint.id)
logger.info('Processing %d untried endpoints', query.count())
while True:
endpoint = query.first()
if not endpoint:
break
print(endpoint.id, endpoint.url)
# one of three things happens:
try:
# 1. set status_code
get = requests.get(endpoint.url, allow_redirects=False, timeout=10)
endpoint.status_code = get.status_code
endpoint.accessed = datetime.utcnow()
if get.status_code in [301, 302, 303]:
endpoint.redirect = get.headers['location']
# and add the result to the queue:
add_url(endpoint.redirect, endpoint.id)
else:
endpoint.html = get.text
# remove boilerplate from html
endpoint.content = html.to_text(endpoint.html)
except (socket.timeout, reqexc.Timeout):
# 2. set endpoint.timeout
endpoint.timeout = datetime.utcnow()
except (reqexc.ConnectionError, reqexc.SSLError, reqexc.MissingSchema,
reqexc.InvalidURL, reqexc.URLRequired):
# 3. set endpoint.error
endpoint.error = datetime.utcnow()
except Exception:
print(endpoint.url)
raise
DBSession.commit()
def tabulate(endpoints):
stdoutn('endpoint_id\turls\tdomain\ttext')
max_len = 65536/2 - 10
for endpoint in endpoints:
trail = ' -> '.join(endpoint.trail())
domain = urllib.parse.urlparse(endpoint.url).netloc.lstrip('www.')
text = endpoint.content.translate(whitespace_translations)
line = '\t'.join([str(endpoint.id), trail, domain, text[:max_len]])
stdoutn(line)
def analyze_content_length(endpoints):
lengths = []
for endpoint in endpoints:
lengths += [len(endpoint.content)]
# for percentile in range(
mean = float(sum(lengths)) / float(len(lengths))
median = sorted(lengths)[len(lengths) / 2]
logger.info('endpoint content length: mean=%0.3f median=%0.1f', mean, median)
| 32.14 | 90 | 0.641257 |
import socket
import urllib.parse
from datetime import datetime
import requests
import requests.exceptions as reqexc
import sqlalchemy.exc as sqlexc
from tsa import stdoutn
from tsa.lib import html
from tsa.models import Endpoint, create_session
from tsa import logging
logger = logging.getLogger(__name__)
whitespace_translations = dict((ord(whitespace), ' ') for whitespace in '\t\n\r')
def add_url(url, parent_id=None):
DBSession = create_session()
endpoint = Endpoint(url=url, parent_id=parent_id)
DBSession.add(endpoint)
try:
DBSession.commit()
except sqlexc.IntegrityError as exc:
DBSession.rollback()
print(exc)
def process_untried_endpoints():
DBSession = create_session()
query = DBSession.query(Endpoint).\
filter(Endpoint.status_code == None).\
filter(Endpoint.timeout == None).\
filter(Endpoint.error == None).\
order_by(Endpoint.id)
logger.info('Processing %d untried endpoints', query.count())
while True:
endpoint = query.first()
if not endpoint:
break
print(endpoint.id, endpoint.url)
# one of three things happens:
try:
# 1. set status_code
get = requests.get(endpoint.url, allow_redirects=False, timeout=10)
endpoint.status_code = get.status_code
endpoint.accessed = datetime.utcnow()
if get.status_code in [301, 302, 303]:
endpoint.redirect = get.headers['location']
# and add the result to the queue:
add_url(endpoint.redirect, endpoint.id)
else:
endpoint.html = get.text
# remove boilerplate from html
endpoint.content = html.to_text(endpoint.html)
except (socket.timeout, reqexc.Timeout):
# 2. set endpoint.timeout
endpoint.timeout = datetime.utcnow()
except (reqexc.ConnectionError, reqexc.SSLError, reqexc.MissingSchema,
reqexc.InvalidURL, reqexc.URLRequired):
# 3. set endpoint.error
endpoint.error = datetime.utcnow()
except Exception:
print(endpoint.url)
raise
DBSession.commit()
def tabulate(endpoints):
stdoutn('endpoint_id\turls\tdomain\ttext')
max_len = 65536/2 - 10
for endpoint in endpoints:
trail = ' -> '.join(endpoint.trail())
domain = urllib.parse.urlparse(endpoint.url).netloc.lstrip('www.')
text = endpoint.content.translate(whitespace_translations)
line = '\t'.join([str(endpoint.id), trail, domain, text[:max_len]])
stdoutn(line)
def analyze_content_length(endpoints):
lengths = []
for endpoint in endpoints:
lengths += [len(endpoint.content)]
# for percentile in range(
mean = float(sum(lengths)) / float(len(lengths))
median = sorted(lengths)[len(lengths) / 2]
logger.info('endpoint content length: mean=%0.3f median=%0.1f', mean, median)
| true | true |
f7217b021c92c57203280273bd959699cf6039c7 | 46,777 | py | Python | learningTolearn/backbone/common.py | ximingxing/Learning-To-Learn | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | 5 | 2019-12-01T02:52:39.000Z | 2020-10-20T01:51:40.000Z | learningTolearn/backbone/common.py | ximingxing/DeepLearningWithPytorch | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | 1 | 2019-11-18T13:26:50.000Z | 2019-11-18T13:26:50.000Z | learningTolearn/backbone/common.py | ximingxing/Learning-To-Learn | 0135cb41521a61d1f3248cf3fe409e51f824fe25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description : Common routines for models in PyTorch.
Author : xxm
"""
__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3',
'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv_block',
'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN',
'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent',
'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchmeta.modules import MetaModule, MetaSequential, MetaConv2d, MetaBatchNorm2d
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class MetaConvBlock(MetaModule):
"""
Meta convolution block with Batch normalization and activation.
Weight and
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(MetaConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MetaConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = MetaBatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x, params=None):
x = self.conv(x, params=self.get_subdict(params, 'conv'))
if self.use_bn:
x = self.bn(x, params=self.get_subdict(params, 'bn'))
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
use_bn=True,
activation=(lambda: nn.ReLU(inplace=True)),
mode='maml'):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
if mode == 'maml':
return MetaSequential(MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation))
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
class InterpolationBlock(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors
"""
def __init__(self,
scale_factor,
mode="bilinear",
align_corners=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.activ = get_activation_layer(mid_activation)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class SequentialConcurrent(nn.Sequential):
"""
A sequential container with concatenated outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'con'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
"""
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
| 31.122422 | 120 | 0.579195 |
__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3',
'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv7x7_block', 'dwconv_block',
'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN',
'DualPathSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricSequential', 'ParametricConcurrent',
'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchmeta.modules import MetaModule, MetaSequential, MetaConv2d, MetaBatchNorm2d
def round_channels(channels,
divisor=8):
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class MetaConvBlock(MetaModule):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(MetaConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MetaConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = MetaBatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x, params=None):
x = self.conv(x, params=self.get_subdict(params, 'conv'))
if self.use_bn:
x = self.bn(x, params=self.get_subdict(params, 'bn'))
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
mode=''):
if mode == 'maml':
return MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
use_bn=True,
activation=(lambda: nn.ReLU(inplace=True)),
mode='maml'):
if mode == 'maml':
return MetaSequential(MetaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation))
else:
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation)
class PreConvBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
class InterpolationBlock(nn.Module):
def __init__(self,
scale_factor,
mode="bilinear",
align_corners=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})'
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
batch, channels, height, width = x.size()
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
self.activ = get_activation_layer(mid_activation)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class SequentialConcurrent(nn.Sequential):
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
| true | true |
f7217b1eb67a285016b2a98bb8fdd6162553f11b | 1,418 | py | Python | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/jwst/__init__.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os.path
from crds import reftypes
HERE = os.path.dirname(__file__) or "."
TYPES = reftypes.from_package_file(__file__)
INSTRUMENTS = TYPES.instruments
EXTENSIONS = TYPES.extensions
TEXT_DESCR = TYPES.text_descr
FILEKINDS = TYPES.filekinds
UNDEFINED_PARKEY_SUBST_VALUE = "UNDEFINED"
INSTRUMENT_FIXERS = {
}
TYPE_FIXERS = {
}
PROVENANCE_KEYWORDS = ("META.REFFILE.DESCRIPTION", "META.REFFILE.PEDIGREE", "META.REFFILE.USEAFTER","META.REFFILE.HISTORY", "META.REFFILE.AUTHOR")
# PROVENANCE_KEYWORDS = ("DESCRIP", "PEDIGREE", "USEAFTER","HISTORY", "AUTHOR")
USEAFTER_KEYWORDS = ("META.OBSERVATION.DATE", "META.OBSERVATION.TIME") # Dataset keywords matching in UseAfter selectors
DEFAULT_SELECTORS = ("Match", "UseAfter") # Normal selector hierarchy in rmap
# When loading headers, make sure each keyword in a tuple is represented with
# the same value enabling any form to be used. Case insensitive.
CROSS_STRAPPED_KEYWORDS = {
"META.INSTRUMENT.NAME" : ["INSTRUME", "INSTRUMENT", "META.INSTRUMENT.TYPE"],
"META.TELESCOPE" : ["TELESCOP","TELESCOPE"],
"META.REFFILE.AUTHOR" : ["AUTHOR"],
"META.REFFILE.PEDIGREE" : ["PEDIGREE"],
"META.REFFILE.USEAFTER" : ["USEAFTER"],
"META.REFFILE.DESCRIPTION" : ["DESCRIP","DESCRIPTION"],
"META.REFFILE.HISTORY" : ["HISTORY"],
}
| 33.761905 | 146 | 0.738364 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os.path
from crds import reftypes
HERE = os.path.dirname(__file__) or "."
TYPES = reftypes.from_package_file(__file__)
INSTRUMENTS = TYPES.instruments
EXTENSIONS = TYPES.extensions
TEXT_DESCR = TYPES.text_descr
FILEKINDS = TYPES.filekinds
UNDEFINED_PARKEY_SUBST_VALUE = "UNDEFINED"
INSTRUMENT_FIXERS = {
}
TYPE_FIXERS = {
}
PROVENANCE_KEYWORDS = ("META.REFFILE.DESCRIPTION", "META.REFFILE.PEDIGREE", "META.REFFILE.USEAFTER","META.REFFILE.HISTORY", "META.REFFILE.AUTHOR")
USEAFTER_KEYWORDS = ("META.OBSERVATION.DATE", "META.OBSERVATION.TIME")
DEFAULT_SELECTORS = ("Match", "UseAfter")
CROSS_STRAPPED_KEYWORDS = {
"META.INSTRUMENT.NAME" : ["INSTRUME", "INSTRUMENT", "META.INSTRUMENT.TYPE"],
"META.TELESCOPE" : ["TELESCOP","TELESCOPE"],
"META.REFFILE.AUTHOR" : ["AUTHOR"],
"META.REFFILE.PEDIGREE" : ["PEDIGREE"],
"META.REFFILE.USEAFTER" : ["USEAFTER"],
"META.REFFILE.DESCRIPTION" : ["DESCRIP","DESCRIPTION"],
"META.REFFILE.HISTORY" : ["HISTORY"],
}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.