id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4869609 | import os
import conll
import sys
ud_version_uri = {
'v2.8': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3687/ud-treebanks-v2.8.tgz',
'v2.7': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3424/ud-treebanks-v2.7.tgz',
'v2.6': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3226/ud-treebanks-v2.6.tgz',
'v2.5': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz',
'v2.4': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2988/ud-treebanks-v2.4.tgz',
'v2.3': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2895/ud-treebanks-v2.3.tgz',
'v2.2': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2837/ud-treebanks-v2.2.tgz',
'v2.1': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2515/ud-treebanks-v2.1.tgz',
'v2.0': 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-1983/ud-treebanks-v2.0.tgz'
}
TMP_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '.tmp')
if __name__ == "__main__":
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2:
if not ud_version_uri[sys.argv[1]]:
print('Unknown or unsupported ud_version `%s`.' % sys.argv[1])
exit(-1)
ud_version_uri = { sys.argv[1]: ud_version_uri[sys.argv[1]] }
elif len(sys.argv) == 3:
ud_version_uri = { sys.argv[1]: sys.argv[2] }
else:
print('Wrong number of arguments. Usage:\n$ python run.py [ud_version [ud_url]]')
exit(-1)
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
print ('Working directory: %s.' % TMP_DIR)
os.chdir(TMP_DIR)
for version, uri in ud_version_uri.items():
print('Processing %s.' % version)
print('Downloading %s...' % uri)
archive_path = conll.process.download(uri)
print('Extracting %s...' % archive_path)
input_conllu_directory = conll.process.extract(archive_path)
print('Flattening %s...' % input_conllu_directory)
output_csv_file = conll.process.flatten(input_conllu_directory) | StarcoderdataPython |
8081861 | import docker
import logging
logger = logging.getLogger(__name__)
class DockerClientMixin(object):
"""A mixin class that gives subclasses access to a docker Client
object.
"""
@property
def docker_cli(self):
"""A docker Client object. Always returns the same one."""
if not hasattr(self, "_docker_cli"):
logger.debug("Creating Docker client")
self._docker_cli = docker.APIClient(
base_url="unix://var/run/docker.sock",
version="auto"
)
return self._docker_cli
| StarcoderdataPython |
6689125 | <reponame>Jovamih/PythonProyectos
#/usr/bin/env python
import pandas as pd
import numpy as np
import numexpr
def rendimiento():
#la libreria numexpr proporciona eval() funcion que evalua literales de cadena a expresiones python logicas
data=pd.DataFrame(np.random.randint(12,200,size=(8,4)),columns=list('ABCD'))
#la libreria Pandas tambien posee pd.eval() para operar atraves de estos
data_mask=pd.eval('data.A>100')
print(data[data_mask])
#podemos usar eval() en python en asignaciones internas
data.eval('TOTAL=A+B+C+D',inplace=True)
print(data)
#el uso particular de eval() es para sumas y gestion de columnas de forma mas rapida y eficiente
#tambien podemos usar query() y combinarlos con el uso de variables temporales
data_mean=(data.mean(axis=0)).mean()
data_mask=data.query('(A>@data_mean )& (D>@data_mean)')
print(data[data_mask])
if __name__=="__main__":
rendimiento() | StarcoderdataPython |
8175266 | #!/usr/bin/env python2.7
#-*- coding: utf-8 -*-
from copy import deepcopy
import shutil, os, sys
from envconfig import *
DST_DIR = './dst-files/'
def load_env_kv() : #return [prog_env, nodes_env]
prog = {
'QTUM_PREFIX' : QTUM_PREFIX,
'QTUM_BIN' : QTUM_BIN,
'CMD_QTUMD': CMD_QTUMD,
'CMD_QTUMCLI': CMD_QTUMCLI
}
return [
deepcopy(prog),
deepcopy(QTUM_NODES)
]
def main() :
env_kv = load_env_kv()
prog_env = env_kv[0]
nodes_env = env_kv[1]
for node_name in sorted(nodes_env.keys()) :
node_datadir = nodes_env[node_name]['NODEX__QTUM_DATADIR']
try:
os.makedirs(node_datadir)
except (OSError) as e :
if e.errno == 17 :
print 'node_datadir(%s) seems not empty, please check it artificially. prog exit now.' % node_datadir
sys.exit(1)
else :
raise
f = os.listdir(node_datadir)
if len(f) != 0:
print 'node_datadir(%s) seems not empty, please check it artificially. prog exit now.'
sys.exit(1)
####
node_dir = os.path.join(DST_DIR, node_name)
####
f_s = os.path.join(node_dir, 'qtum.conf')
f_d = os.path.join(node_datadir, 'qtum.conf')
shutil.copy(f_s, f_d)
####
filename = node_name + '--' + 'wrp-qtumd.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
filename = node_name + '--' + 'wrp-qtum-cli.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
filename = node_name + '--' + 'wrp-solar.sh'
f_s = os.path.join(node_dir, filename)
f_d = os.path.join(QTUM_BIN, filename)
shutil.copy(f_s, f_d)
os.system('chmod +x %s ' % f_d)
####
f_s = os.path.join(DST_DIR, 'qtum-path.sh')
f_d = os.path.join('/etc/profile.d', 'qtum-path.sh')
cmd = "sudo cp -rf %s %s" % (f_s, f_d)
print "cmd: %s ; to set PATH" % cmd
os.system(cmd)
os.chdir(QTUM_BIN)
####
sl_list = [
(QTUM_DFT_NODE + '--' + 'wrp-qtumd.sh', 'wrp-qtumd'),
(QTUM_DFT_NODE + '--' + 'wrp-qtum-cli.sh', 'wrp-qtum-cli'),
(QTUM_DFT_NODE + '--' + 'wrp-solar.sh', 'wrp-solar'),
]
for (f_r, f_l) in sl_list :
try:
os.remove(f_l)
except OSError as e:
if e.errno == 2:
pass
else :
raise
os.symlink(f_r, f_l)
if __name__ == '__main__' :
main()
| StarcoderdataPython |
5020412 | import inspect
from ..functions import CF
_VN = CF()
class DocstringRewriteMeta(type):
"""Modify docstrings at time of import.
**Methodology**
To do this, we intercede before the class is created and modify
the docstrings of the attributes defined on the class.
Inherited methods are also modified. We cannot simply modify the
docstrings of inherited methods, because then the parent classes'
methods will have the wrong docstring. Instead, we must actually
copy the functions, and then modify the docstring.
Special treatment is given to methods decorated with
``@property``, ``@staticmethod`` and ``@classmethod``, as well as
user-defined decorations.
.. versionadded:: (cfdm) 1.8.7.0
"""
# Based on
# http://www.jesshamrick.com/2013/04/17/rewriting-python-docstrings-with-a-metaclass/
def __new__(cls, class_name, parents, attrs):
"""Combines docstring substitutions across the inheritance tree.
That is, combines docstring substitutions from all classes in the
inheritance tree.
The value for a key that occurs in multiple classes will be taken
from the class closest to the child class.
"""
class_name_lower = class_name.lower()
docstring_rewrite = {}
for parent in parents[::-1]:
parent_docstring_rewrite = getattr(
parent, "_docstring_substitutions", None
)
if parent_docstring_rewrite is not None:
docstring_rewrite.update(parent_docstring_rewrite(parent))
else:
parent_docstring_rewrite = getattr(
parent, "__docstring_substitutions__", None
)
if parent_docstring_rewrite is not None:
docstring_rewrite.update(parent_docstring_rewrite(None))
class_docstring_rewrite = attrs.get(
"__docstring_substitutions__", None
)
if class_docstring_rewrite is not None:
docstring_rewrite.update(class_docstring_rewrite(None))
special = DocstringRewriteMeta._docstring_special_substitutions()
for key in special:
if key in docstring_rewrite:
raise ValueError(
f"Can't use {key!r} as a user-defined "
"docstring substitution."
)
# ------------------------------------------------------------
# Find the package depth
# ------------------------------------------------------------
package_depth = 0
for parent in parents[::-1]:
parent_depth = getattr(parent, "_docstring_package_depth", None)
if parent_depth is not None:
package_depth = parent_depth(parent)
else:
parent_depth = getattr(
parent, "__docstring_package_depth__", None
)
if parent_depth is not None:
package_depth = parent_depth(None)
class_depth = attrs.get("__docstring_package_depth__", None)
if class_depth is not None:
package_depth = class_depth(None)
package_depth += 1
module = attrs["__module__"].split(".")
package_name = ".".join(module[:package_depth])
# ------------------------------------------------------------
# Find which methods to exclude
# ------------------------------------------------------------
method_exclusions = []
for parent in parents[::-1]:
parent_exclusions = getattr(
parent, "_docstring_method_exclusions", None
)
if parent_exclusions is not None:
method_exclusions.extend(parent_exclusions(parent))
else:
parent_exclusions = getattr(
parent, "__docstring_method_exclusions__", None
)
if parent_exclusions is not None:
method_exclusions.extend(parent_exclusions(None))
class_exclusions = attrs.get("__docstring_method_exclusions__", None)
if class_exclusions is not None:
method_exclusions.extend(class_exclusions(None))
method_exclusions = set(method_exclusions)
for attr_name, attr in attrs.items():
# Skip special methods that aren't functions
if attr_name.startswith("__") and not inspect.isfunction(attr):
continue
# Skip methods without docstrings
if not hasattr(attr, "__doc__"):
continue
if attr_name in method_exclusions:
continue
# @property
if hasattr(attr, "fget"):
# Note that here inspect.isroutine(attr) is False for
# @property methods (but this is not the case for
# properties in the parent classes).
DocstringRewriteMeta._docstring_update(
package_name,
class_name,
class_name_lower,
attr,
attr_name,
docstring_rewrite,
)
continue
# Still here?
if not inspect.isroutine(attr):
continue
# Still here?
is_classmethod = False
is_staticmethod = False
# Find out if the method is a classmethod (the
# inspect.ismethod technique doesn't work for this class)
if isinstance(attr, classmethod):
is_classmethod = True
elif isinstance(attr, staticmethod):
is_staticmethod = True
is_wrapped = hasattr(attr, "__wrapped__")
if is_wrapped:
wrapper = attr
attr = attr.__wrapped__
if is_classmethod or is_staticmethod:
f = getattr(attr, "__func__", attr)
# Copy the method
attr = type(f)(
f.__code__,
f.__globals__,
f.__name__,
f.__defaults__,
f.__closure__,
)
# Make sure that the keyword argument defaults are set
# correctly. In general they will be, but not if there
# is a variable number of positional arguments, such
# as in: def foo(self, *x, y=None)
attr.__kwdefaults__ = f.__kwdefaults__
# Update docstring
DocstringRewriteMeta._docstring_update(
package_name,
class_name,
class_name_lower,
attr,
attr_name,
docstring_rewrite,
)
# Redecorate
if is_classmethod:
attrs[attr_name] = classmethod(attr)
if is_staticmethod:
attrs[attr_name] = staticmethod(attr)
if is_wrapped:
wrapper.__doc__ = attr.__doc__
wrapper.__wrapped__ = attr
attrs[attr_name] = wrapper
# ------------------------------------------------------------
# Now loop round the parent classes, copying any methods that
# they override and rewriting those docstrings.
# ------------------------------------------------------------
for parent in parents:
for attr_name in dir(parent):
if attr_name in attrs:
# We already have this method from higher up in
# the method resolution order, so do not overwrite
# it and move on to to the next method.
continue
# ----------------------------------------------------
# Get the original method, copy it, update the
# docstring, and put the mosfied copy back into the
# parent class.
# ----------------------------------------------------
original_f = getattr(parent, attr_name)
# Skip special methods that aren't functions
if attr_name.startswith("__") and not inspect.isfunction(
original_f
):
continue
if attr_name in method_exclusions:
continue
is_classmethod = False
is_staticmethod = False
is_wrapped = False
try:
if hasattr(original_f, "fget"):
# The original function is decorated with
# @property
attr = type(original_f)(
original_f.fget, original_f.fset, original_f.fdel
)
else:
if not inspect.isroutine(original_f):
continue
if inspect.ismethod(original_f):
is_classmethod = True
elif isinstance(
parent.__dict__.get(attr_name), staticmethod
):
is_staticmethod = True
is_wrapped = hasattr(original_f, "__wrapped__")
f = getattr(original_f, "__func__", original_f)
# Copy the method
attr = type(f)(
f.__code__,
f.__globals__,
f.__name__,
f.__defaults__,
f.__closure__,
)
# Make sure that the keyword argument defaults
# are set correctly. In general they will be,
# but not if there is a variable number of
# positional arguments, such as in: def
# foo(self, *x, y=None)
attr.__kwdefaults__ = f.__kwdefaults__
if is_wrapped:
attr.__doc__ = original_f.__doc__
# Update the docstring
DocstringRewriteMeta._docstring_update(
package_name,
class_name,
class_name_lower,
attr,
attr_name,
docstring_rewrite,
)
# Register a classmethod
if is_classmethod:
attr = classmethod(attr)
# Register a classmethod
if is_staticmethod:
attr = staticmethod(attr)
if is_wrapped:
# Copy the wrapper and update its wrapped
# function
wrapper = type(original_f)(
original_f.__code__,
original_f.__globals__,
original_f.__name__,
original_f.__defaults__,
original_f.__closure__,
)
wrapper.__wrapped__ = attr
wrapper.__doc__ = attr.__doc__
attr = wrapper
# Put the modified method back into the parent
# class
attrs[attr_name] = attr
except Exception:
pass
# raise RuntimeError(str(error) + ': ' +
# '.'.join([parent.__name__,
# attr_name]))
# ------------------------------------------------------------
# Rewrite the docstring of the class itself.
#
# The method is as follows:
#
# 1. If __doc__ contains substitutions then save the
# unsubstituted docstring in __doc_template__, rewrite the
# docstring, and save it in __doc__.
#
# 2. If __doc__ is not None and does not contain substitutions
# then set __doc_template___ to None.
#
# 3. If __doc__ is None then search back through the parent
# classes until you found one with a non-None __doc__ AND a
# non-None __doc_template__. If such a parent exists then
# copy its __doc_template__ to the child class's
# __doc_template__, rewrite it, and save the rewritten
# docstring to the child class's __doc__.
#
# ------------------------------------------------------------
doc = attrs.get("__doc__")
doc_template = None
set_doc_template_to_None = False
if doc is None:
for parent in parents[::-1]:
x = getattr(parent, "__doc__", None)
if x is not None:
doc_template = getattr(parent, "__doc_template__", None)
if doc_template is not None:
break
if doc_template is None:
set_doc_template_to_None = True
if doc_template is not None:
doc = doc_template
if doc is not None and "{{" in doc:
doc_template = doc
doc = DocstringRewriteMeta._docstring_update(
package_name,
class_name,
class_name_lower,
None,
None,
docstring_rewrite,
class_docstring=doc,
)
attrs["__doc__"] = doc
if set_doc_template_to_None:
doc_template = None
attrs["__doc_template__"] = doc_template
# ------------------------------------------------------------
# Create the class
# ------------------------------------------------------------
return super().__new__(cls, class_name, parents, attrs)
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
@classmethod
def _docstring_special_substitutions(cls):
"""Return the special docstring substitutions.
``{{class}}`` is replaced by the name of the class.
``{{class_lower}}`` is replaced by the name of the class
convert to all lower case.
``{{package}}`` is replaced by the name of the package, as defined
by the first N ``.`` (dot) separated fields of the class's
`__module__` attribute, where is N determined by
`_docstring_package_depth`.
.. versionadded:: (cfdm) 1.8.7.0
.. seealso:: `_docstring_package_depth`,
`_docstring_method_exclusions`,
`_docstring_substitutions`,
`__docstring_substitutions__`,
`__docstring_package_depth__`,
`__docstring_method_exclusions__`
:Returns:
`tuple`
The special docstring substitution identifiers.
"""
return (
"{{class}}",
"{{class_lower}}",
"{{package}}",
"{{VN}}",
)
@staticmethod
def _docstring_substitutions(cls):
"""Returns the substitutions that apply to methods of the class.
Text to be replaced is specified as a key in the returned
dictionary, with the replacement text defined by the corresponding
value.
Special docstring substitutions, as defined by a class's
`_docstring_special_substitutions` method, may be used in the
replacement text, and will be substituted as usual.
Replacement text may contain other non-special substitutions.
.. note:: The values are only checked once for embedded
non-special substitutions, so if the embedded
substitution itself contains a non-special substitution
then the latter will *not* be replaced. This restriction
is to prevent the possibility of infinite recursion.
A key must be either a `str` or a `re.Pattern` object.
If a key is a `str` then the corresponding value must be a string.
If a key is a `re.Pattern` object then the corresponding value
must be a string or a callable, as accepted by the
`re.Pattern.sub` method.
.. versionadded:: (cfdm) 1.8.7.0
.. seealso:: `_docstring_special_substitutions`,
`_docstring_package_depth`,
`_docstring_method_exclusions`,
`__docstring_substitutions__`,
`__docstring_package_depth__`,
`__docstring_method_exclusions__`
:Parameters:
cls: class
The class.
:Returns:
`dict`
The docstring substitutions. A dictionary key matches text
in the docstrings, with a corresponding value its
replacement.
"""
out = {}
for klass in cls.__bases__[::-1]:
d_s = getattr(klass, "_docstring_substitutions", None)
if d_s is not None:
out.update(d_s(klass))
else:
d_s = getattr(klass, "__docstring_substitutions__", None)
if d_s is not None:
out.update(d_s(None))
d_s = getattr(cls, "__docstring_substitutions__", None)
if d_s is not None:
out.update(d_s(None))
return out
@staticmethod
def _docstring_package_depth(cls):
"""Returns the class {{package}} substitutions package depth.
In docstrings, ``{{package}}`` is replaced by the name of the
package, as defined by the first N+1 ``.`` (dot) separated fields
of the class's `__module__` attribute.
N defaults to 0, but may be set to any non-negative integer, M, by
creating a `__docstring_package_depth__` method that returns M.
.. versionadded:: (cfdm) 1.8.7.0
.. seealso:: `_docstring_special_substitutions`,
`_docstring_substitutions`,
`_docstring_method_exclusions`,
`__docstring_substitutions__`,
`__docstring_package_depth__`,
`__docstring_method_exclusions__`
:Parameters:
cls: class
The class.
:Returns:
`int`
The package depth.
"""
out = 0
for klass in cls.__bases__[::-1]:
d_s = getattr(klass, "_docstring_package_depth", None)
if d_s is not None:
out = d_s(klass)
else:
d_s = getattr(klass, "__docstring_package_depth__", None)
if d_s is not None:
out = d_s(None)
d_s = getattr(cls, "__docstring_package_depth__", None)
if d_s is not None:
out = d_s(None)
return out
@staticmethod
def _docstring_method_exclusions(cls):
"""Returns method names excluded in the class substitutions.
Exclusions for a class may be defined by creating a
`__docstring_method_exclusions__` method that returns the sequence
of names of methods to be excluded. These exclusions will also
apply to any child classes.
Exclusions may be defined for any reason, but in particular may
be required if a method has a non-rewritable docstring. An
example of method that has a non-rewritable docstring is when the
method is a 'method_descriptor' object, such as `list.append`: any
class that inherits such such a method will need to exclude it,
unless it is explicitly overridden in the child class.
.. versionadded:: (cfdm) 1.8.7.0
.. seealso:: `_docstring_special_substitutions`,
`_docstring_substitutions`,
`_docstring_package_depth`,
`__docstring_substitutions__`,
`__docstring_package_depth__`,
`__docstring_method_exclusions__`
:Parameters:
cls: class
The class.
:Returns:
`set`
The names of the methods to exclude from the docstring
substitution process.
"""
out = [
"_docstring_special_substitutions",
"_docstring_package_depth",
]
for klass in cls.__bases__[::-1]:
d_s = getattr(klass, "_docstring_method_exclusions", None)
if d_s is not None:
out.extend(d_s(klass))
else:
d_s = getattr(klass, "__docstring_method_exclusions__", None)
if d_s is not None:
out.extend(d_s(None))
d_s = getattr(cls, "__docstring_method_exclusions__", None)
if d_s is not None:
out.extend(d_s(None))
return set(out)
@classmethod
def _docstring_update(
cls,
package_name,
class_name,
class_name_lower,
f,
method_name,
config,
class_docstring=None,
):
"""Performs docstring substitutions on a method at import time.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
package_name: `str`
class_name: `str`
f: class method
method_name: `str`
config: `dict`
"""
if class_docstring is not None:
doc = class_docstring
else:
doc = f.__doc__
if doc is None or "{{" not in doc:
return doc
# ------------------------------------------------------------
# Do general substitutions first
# ------------------------------------------------------------
for key, value in config.items():
# Substitute non-special substitutions embedded within
# this value, updating the value if any are found. Note
# that any non-special substitutions embedded within the
# embedded substituion are *not* replaced.
# for k, v in config.items():
# try:
# if k not in value:
# continue
# except TypeError:
# continue
#
# try:
# # Compiled regular expression substitution
# value = key.sub(v, value)
# except AttributeError:
# # String substitution
# value = value.replace(k, v)
# Substitute the key for the value
try:
# Compiled regular expression substitution
doc = key.sub(value, doc)
except AttributeError:
# String substitution
doc = doc.replace(key, value)
# ------------------------------------------------------------
# Now do special substitutions
# ------------------------------------------------------------
# Insert the name of the package
doc = doc.replace("{{package}}", package_name)
# Insert the name of the class containing this method
doc = doc.replace("{{class}}", class_name)
# Insert the lower case name of the class containing this method
doc = doc.replace("{{class_lower}}", class_name_lower)
# Insert the CF version
doc = doc.replace("{{VN}}", _VN)
# ----------------------------------------------------------------
# Set the rewritten docstring on the method
# ----------------------------------------------------------------
if class_docstring is None:
f.__doc__ = doc
return doc
| StarcoderdataPython |
6626252 | searched_book = input()
command = input()
book_count = 0
book_found = False
while command != "No More Books":
if command == searched_book:
book_found = True
break
book_count += 1
command = input()
if not book_found:
print("The book you search is not here!")
print(f"You checked {book_count} books.")
else:
print(f"You checked {book_count} books and found it.") | StarcoderdataPython |
4957842 | <filename>src/vidsortml/utilities/database_utilities.py
import pickle
import pprint
import os
import pathlib
DATABASES_ROOT_DIR = pathlib.Path(__file__).parents[1] / 'databases'
class database:
name: str = "Default"
data: dict
def __init__(self, name):
self.name = name
self.data = {}
def update(self, key, value):
self.data[key] = value
def display_entries(self):
pprint.pprint(self.data)
# for key, value in self.data.items():
def search(self, names):
if isinstance(names, str):
names = [names]
for movie, people in self.data.items():
if all(name in people for name in names):
names_str = " and ".join(names)
print("Found", names_str, "in", movie+".")
def load_database(name):
try:
database_path = pathlib.Path(DATABASES_ROOT_DIR / name)
with open(database_path, 'rb') as f:
db = pickle.load(f)
except FileNotFoundError:
db = database(name)
return db
def save_database(db):
try:
if not DATABASES_ROOT_DIR.is_dir():
DATABASES_ROOT_DIR.mkdir(parents=True, exist_ok=False)
database_path = pathlib.Path(DATABASES_ROOT_DIR / db.name)
with open(database_path, 'wb') as f:
pickle.dump(db, f)
except Exception as e:
print(e)
return 1
return 0
def unittest():
db = load_database("Test")
db.update("Test", {"Person 1": 2, "Person 2": 4})
save_database(db)
db = load_database("Test")
db.display_entries()
db.search("Person 1")
db.search(["Person 1", "Person 2"])
if __name__ == "__main__":
db = load_database("test3")
db.display_entries()
db.search("<NAME>")
db.search(["<NAME>", "<NAME>"])
| StarcoderdataPython |
1779974 | from .iExecutor import iExecutor
class Printer(iExecutor):
def __init__(self, *parents, msg=""):
self.msg = msg
super().__init__(*parents)
def run(self, item):
if item is not None:
print(f"printer loaded, item {item}, message: {self.msg}", flush=True)
else:
print(f"printer loaded, message: {self.msg}", flush=True)
return item
| StarcoderdataPython |
8054927 | <reponame>ResonantGeoData/ResonantGeoData<filename>django-rgd-imagery/rgd_imagery/tasks/jobs.py
from celery import shared_task
from rgd.tasks import helpers
@shared_task(time_limit=86400)
def task_load_image(file_pk):
from rgd_imagery.models import Image
from rgd_imagery.tasks.etl import load_image
image_file = Image.objects.get(pk=file_pk)
helpers._run_with_failure_reason(image_file, load_image, file_pk)
@shared_task(time_limit=86400)
def task_populate_raster(raster_pk):
from rgd_imagery.models import Raster
from rgd_imagery.tasks.etl import populate_raster
raster = Raster.objects.get(pk=raster_pk)
helpers._run_with_failure_reason(raster, populate_raster, raster_pk)
@shared_task(time_limit=86400)
def task_populate_raster_footprint(raster_pk):
from rgd_imagery.models import Raster
from rgd_imagery.tasks.etl import populate_raster_footprint
raster = Raster.objects.get(pk=raster_pk)
helpers._run_with_failure_reason(raster, populate_raster_footprint, raster_pk)
@shared_task(time_limit=86400)
def task_populate_raster_outline(raster_pk):
from rgd_imagery.models import Raster
from rgd_imagery.tasks.etl import populate_raster_outline
raster = Raster.objects.get(pk=raster_pk)
helpers._run_with_failure_reason(raster, populate_raster_outline, raster_pk)
@shared_task(time_limit=86400)
def task_load_kwcoco_dataset(kwcoco_dataset_pk):
from rgd_imagery.models import KWCOCOArchive
from rgd_imagery.tasks.kwcoco_etl import load_kwcoco_dataset
ds_entry = KWCOCOArchive.objects.get(pk=kwcoco_dataset_pk)
helpers._run_with_failure_reason(ds_entry, load_kwcoco_dataset, kwcoco_dataset_pk)
@shared_task(time_limit=86400)
def task_run_processed_image(processed_pk):
from rgd_imagery.models import ProcessedImage
from rgd_imagery.tasks.subsample import run_processed_image
obj = ProcessedImage.objects.get(pk=processed_pk)
helpers._run_with_failure_reason(obj, run_processed_image, processed_pk)
| StarcoderdataPython |
6542996 | <reponame>jim-schwoebel/allie
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['standard_features']
A standard feature array extracted using LibROSA's library.
'''
import librosa, os, uuid
import numpy as np
from pydub import AudioSegment
def audio_featurize(wavfile):
#initialize features
hop_length = 512
n_fft=2048
#load file
y, sr = librosa.load(wavfile)
#extract mfcc coefficients
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
#extract mean, standard deviation, min, and max value in mfcc frame, do this across all mfccs
mfcc_features=np.array([np.mean(mfcc[0]),np.std(mfcc[0]),np.amin(mfcc[0]),np.amax(mfcc[0]),
np.mean(mfcc[1]),np.std(mfcc[1]),np.amin(mfcc[1]),np.amax(mfcc[1]),
np.mean(mfcc[2]),np.std(mfcc[2]),np.amin(mfcc[2]),np.amax(mfcc[2]),
np.mean(mfcc[3]),np.std(mfcc[3]),np.amin(mfcc[3]),np.amax(mfcc[3]),
np.mean(mfcc[4]),np.std(mfcc[4]),np.amin(mfcc[4]),np.amax(mfcc[4]),
np.mean(mfcc[5]),np.std(mfcc[5]),np.amin(mfcc[5]),np.amax(mfcc[5]),
np.mean(mfcc[6]),np.std(mfcc[6]),np.amin(mfcc[6]),np.amax(mfcc[6]),
np.mean(mfcc[7]),np.std(mfcc[7]),np.amin(mfcc[7]),np.amax(mfcc[7]),
np.mean(mfcc[8]),np.std(mfcc[8]),np.amin(mfcc[8]),np.amax(mfcc[8]),
np.mean(mfcc[9]),np.std(mfcc[9]),np.amin(mfcc[9]),np.amax(mfcc[9]),
np.mean(mfcc[10]),np.std(mfcc[10]),np.amin(mfcc[10]),np.amax(mfcc[10]),
np.mean(mfcc[11]),np.std(mfcc[11]),np.amin(mfcc[11]),np.amax(mfcc[11]),
np.mean(mfcc[12]),np.std(mfcc[12]),np.amin(mfcc[12]),np.amax(mfcc[12]),
np.mean(mfcc_delta[0]),np.std(mfcc_delta[0]),np.amin(mfcc_delta[0]),np.amax(mfcc_delta[0]),
np.mean(mfcc_delta[1]),np.std(mfcc_delta[1]),np.amin(mfcc_delta[1]),np.amax(mfcc_delta[1]),
np.mean(mfcc_delta[2]),np.std(mfcc_delta[2]),np.amin(mfcc_delta[2]),np.amax(mfcc_delta[2]),
np.mean(mfcc_delta[3]),np.std(mfcc_delta[3]),np.amin(mfcc_delta[3]),np.amax(mfcc_delta[3]),
np.mean(mfcc_delta[4]),np.std(mfcc_delta[4]),np.amin(mfcc_delta[4]),np.amax(mfcc_delta[4]),
np.mean(mfcc_delta[5]),np.std(mfcc_delta[5]),np.amin(mfcc_delta[5]),np.amax(mfcc_delta[5]),
np.mean(mfcc_delta[6]),np.std(mfcc_delta[6]),np.amin(mfcc_delta[6]),np.amax(mfcc_delta[6]),
np.mean(mfcc_delta[7]),np.std(mfcc_delta[7]),np.amin(mfcc_delta[7]),np.amax(mfcc_delta[7]),
np.mean(mfcc_delta[8]),np.std(mfcc_delta[8]),np.amin(mfcc_delta[8]),np.amax(mfcc_delta[8]),
np.mean(mfcc_delta[9]),np.std(mfcc_delta[9]),np.amin(mfcc_delta[9]),np.amax(mfcc_delta[9]),
np.mean(mfcc_delta[10]),np.std(mfcc_delta[10]),np.amin(mfcc_delta[10]),np.amax(mfcc_delta[10]),
np.mean(mfcc_delta[11]),np.std(mfcc_delta[11]),np.amin(mfcc_delta[11]),np.amax(mfcc_delta[11]),
np.mean(mfcc_delta[12]),np.std(mfcc_delta[12]),np.amin(mfcc_delta[12]),np.amax(mfcc_delta[12])])
return mfcc_features
def exportfile(newAudio,time1,time2,filename,i):
#Exports to a wav file in the current path.
newAudio2 = newAudio[time1:time2]
g=os.listdir()
if filename[0:-4]+'_'+str(i)+'.wav' in g:
filename2=str(uuid.uuid4())+'_segment'+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2,format="wav")
else:
filename2=str(uuid.uuid4())+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2, format="wav")
return filename2
def audio_time_features(filename):
#recommend >0.50 seconds for timesplit
timesplit=0.50
hop_length = 512
n_fft=2048
y, sr = librosa.load(filename)
duration=float(librosa.core.get_duration(y))
#Now splice an audio signal into individual elements of 100 ms and extract
#all these features per 100 ms
segnum=round(duration/timesplit)
deltat=duration/segnum
timesegment=list()
time=0
for i in range(segnum):
#milliseconds
timesegment.append(time)
time=time+deltat*1000
if filename[-4:]=='.wav':
newAudio = AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
newAudio = AudioSegment.from_mp3(filename)
filelist=list()
for i in range(len(timesegment)-1):
filename=exportfile(newAudio,timesegment[i],timesegment[i+1],filename,i)
filelist.append(filename)
featureslist=np.array([0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0])
#save 100 ms segments in current folder (delete them after)
for j in range(len(filelist)):
try:
features=audio_featurize(filelist[i])
featureslist=featureslist+features
os.remove(filelist[j])
except:
print('error splicing')
featureslist.append('silence')
os.remove(filelist[j])
# now scale the featureslist array by the length to get mean in each category
featureslist=featureslist/segnum
return featureslist
def standard_featurize(filename):
features=np.append(audio_featurize(filename), audio_time_features(filename))
# labels
labels=['mfcc_1_mean_20ms','mfcc_1_std_20ms', 'mfcc_1_min_20ms', 'mfcc_1_max_20ms',
'mfcc_2_mean_20ms','mfcc_2_std_20ms', 'mfcc_2_min_20ms', 'mfcc_2_max_20ms',
'mfcc_3_mean_20ms','mfcc_3_std_20ms', 'mfcc_3_min_20ms', 'mfcc_3_max_20ms',
'mfcc_4_mean_20ms','mfcc_4_std_20ms', 'mfcc_4_min_20ms', 'mfcc_4_max_20ms',
'mfcc_5_mean_20ms','mfcc_5_std_20ms', 'mfcc_5_min_20ms', 'mfcc_5_max_20ms',
'mfcc_6_mean_20ms','mfcc_6_std_20ms', 'mfcc_6_min_20ms', 'mfcc_6_max_20ms',
'mfcc_7_mean_20ms','mfcc_7_std_20ms', 'mfcc_7_min_20ms', 'mfcc_7_max_20ms',
'mfcc_8_mean_20ms','mfcc_8_std_20ms', 'mfcc_8_min_20ms', 'mfcc_8_max_20ms',
'mfcc_9_mean_20ms','mfcc_9_std_20ms', 'mfcc_9_min_20ms', 'mfcc_9_max_20ms',
'mfcc_10_mean_20ms','mfcc_10_std_20ms', 'mfcc_10_min_20ms', 'mfcc_10_max_20ms',
'mfcc_11_mean_20ms','mfcc_11_std_20ms', 'mfcc_11_min_20ms', 'mfcc_11_max_20ms',
'mfcc_12_mean_20ms','mfcc_12_std_20ms', 'mfcc_12_min_20ms', 'mfcc_12_max_20ms',
'mfcc_13_mean_20ms','mfcc_13_std_20ms', 'mfcc_13_min_20ms', 'mfcc_13_max_20ms',
'mfcc_1_delta_mean_20ms','mfcc_1_delta_std_20ms', 'mfcc_1_delta_min_20ms', 'mfcc_1_delta_max_20ms',
'mfcc_2_delta_mean_20ms','mfcc_2_delta_std_20ms', 'mfcc_2_delta_min_20ms', 'mfcc_2_delta_max_20ms',
'mfcc_3_delta_mean_20ms','mfcc_3_delta_std_20ms', 'mfcc_3_delta_min_20ms', 'mfcc_3_delta_max_20ms',
'mfcc_4_delta_mean_20ms','mfcc_4_delta_std_20ms', 'mfcc_4_delta_min_20ms', 'mfcc_4_delta_max_20ms',
'mfcc_5_delta_mean_20ms','mfcc_5_delta_std_20ms', 'mfcc_5_delta_min_20ms', 'mfcc_5_delta_max_20ms',
'mfcc_6_delta_mean_20ms','mfcc_6_delta_std_20ms', 'mfcc_6_delta_min_20ms', 'mfcc_6_delta_max_20ms',
'mfcc_7_delta_mean_20ms','mfcc_7_delta_std_20ms', 'mfcc_7_delta_min_20ms', 'mfcc_7_delta_max_20ms',
'mfcc_8_delta_mean_20ms','mfcc_8_delta_std_20ms', 'mfcc_8_delta_min_20ms', 'mfcc_8_delta_max_20ms',
'mfcc_9_delta_mean_20ms','mfcc_9_delta_std_20ms', 'mfcc_9_delta_min_20ms', 'mfcc_9_delta_max_20ms',
'mfcc_10_delta_mean_20ms','mfcc_10_delta_std_20ms', 'mfcc_10_delta_min_20ms', 'mfcc_10_delta_max_20ms',
'mfcc_11_delta_mean_20ms','mfcc_11_delta_std_20ms', 'mfcc_11_delta_min_20ms', 'mfcc_11_delta_max_20ms',
'mfcc_12_delta_mean_20ms','mfcc_12_delta_std_20ms', 'mfcc_12_delta_min_20ms', 'mfcc_12_delta_max_20ms',
'mfcc_13_delta_mean_20ms','mfcc_13_delta_std_20ms', 'mfcc_13_delta_min_20ms', 'mfcc_13_delta_max_20ms',
'mfcc_1_mean_500ms','mfcc_1_std_500ms', 'mfcc_1_min_500ms', 'mfcc_1_max_500ms',
'mfcc_2_mean_500ms','mfcc_2_std_500ms', 'mfcc_2_min_500ms', 'mfcc_2_max_500ms',
'mfcc_3_mean_500ms','mfcc_3_std_500ms', 'mfcc_3_min_500ms', 'mfcc_3_max_500ms',
'mfcc_4_mean_500ms','mfcc_4_std_500ms', 'mfcc_4_min_500ms', 'mfcc_4_max_500ms',
'mfcc_5_mean_500ms','mfcc_5_std_500ms', 'mfcc_5_min_500ms', 'mfcc_5_max_500ms',
'mfcc_6_mean_500ms','mfcc_6_std_500ms', 'mfcc_6_min_500ms', 'mfcc_6_max_500ms',
'mfcc_7_mean_500ms','mfcc_7_std_500ms', 'mfcc_7_min_500ms', 'mfcc_7_max_500ms',
'mfcc_8_mean_500ms','mfcc_8_std_500ms', 'mfcc_8_min_500ms', 'mfcc_8_max_500ms',
'mfcc_9_mean_500ms','mfcc_9_std_500ms', 'mfcc_9_min_500ms', 'mfcc_9_max_500ms',
'mfcc_10_mean_500ms','mfcc_10_std_500ms', 'mfcc_10_min_500ms', 'mfcc_10_max_500ms',
'mfcc_11_mean_500ms','mfcc_11_std_500ms', 'mfcc_11_min_500ms', 'mfcc_11_max_500ms',
'mfcc_12_mean_500ms','mfcc_12_std_500ms', 'mfcc_12_min_500ms', 'mfcc_12_max_500ms',
'mfcc_13_mean_500ms','mfcc_13_std_500ms', 'mfcc_13_min_500ms', 'mfcc_13_max_500ms',
'mfcc_1_delta_mean_500ms','mfcc_1_delta_std_500ms', 'mfcc_1_delta_min_500ms', 'mfcc_1_delta_max_500ms',
'mfcc_2_delta_mean_500ms','mfcc_2_delta_std_500ms', 'mfcc_2_delta_min_500ms', 'mfcc_2_delta_max_500ms',
'mfcc_3_delta_mean_500ms','mfcc_3_delta_std_500ms', 'mfcc_3_delta_min_500ms', 'mfcc_3_delta_max_500ms',
'mfcc_4_delta_mean_500ms','mfcc_4_delta_std_500ms', 'mfcc_4_delta_min_500ms', 'mfcc_4_delta_max_500ms',
'mfcc_5_delta_mean_500ms','mfcc_5_delta_std_500ms', 'mfcc_5_delta_min_500ms', 'mfcc_5_delta_max_500ms',
'mfcc_6_delta_mean_500ms','mfcc_6_delta_std_500ms', 'mfcc_6_delta_min_500ms', 'mfcc_6_delta_max_500ms',
'mfcc_7_delta_mean_500ms','mfcc_7_delta_std_500ms', 'mfcc_7_delta_min_500ms', 'mfcc_7_delta_max_500ms',
'mfcc_8_delta_mean_500ms','mfcc_8_delta_std_500ms', 'mfcc_8_delta_min_500ms', 'mfcc_8_delta_max_500ms',
'mfcc_9_delta_mean_500ms','mfcc_9_delta_std_500ms', 'mfcc_9_delta_min_500ms', 'mfcc_9_delta_max_500ms',
'mfcc_10_delta_mean_500ms','mfcc_10_delta_std_500ms', 'mfcc_10_delta_min_500ms', 'mfcc_10_delta_max_500ms',
'mfcc_11_delta_mean_500ms','mfcc_11_delta_std_500ms', 'mfcc_11_delta_min_500ms', 'mfcc_11_delta_max_500ms',
'mfcc_12_delta_mean_500ms','mfcc_12_delta_std_500ms', 'mfcc_12_delta_min_500ms', 'mfcc_12_delta_max_500ms',
'mfcc_13_delta_mean_500ms','mfcc_13_delta_std_500ms', 'mfcc_13_delta_min_500ms', 'mfcc_13_delta_max_500ms']
return features, labels
| StarcoderdataPython |
3281703 | ### Subtract the Product and Sum of Digits of an Integer - Solution
class Solution:
def subtractProductAndSum(self, n: int) -> int:
total_sum, product = 0, 1
while (n > 0):
total_sum += n % 10
product *= n % 10
n //= 10
diff = product - total_sum
return diff | StarcoderdataPython |
5017015 | <filename>src/scripts/client-soappy.py
#!/usr/bin/env python
# Example SOAP client for the Mutalyzer web service in Python using the
# SOAPpy library.
#
# See https://mutalyzer.nl/webservices
#
# Usage:
# python client-soappy.py 'NM_002001.2:c.1del'
#
# This code is in the public domain; it can be used for whatever purpose
# with absolutely no restrictions.
import sys
from SOAPpy import WSDL
URL = 'https://mutalyzer.nl/services/?wsdl'
if len(sys.argv) < 2:
print 'Please provide a variant'
sys.exit(1)
o = WSDL.Proxy(URL)
print 'Checking ' + sys.argv[1] + ' ...'
r = o.checkSyntax(variant=sys.argv[1])
# SOAPpy does not translate the SOAP boolean to a Python boolean
if r.valid == 'true':
print 'Valid!'
else:
print 'Not valid!'
if r.messages:
# This seems to be a bug in SOAPpy. Arrays of length 1 are
# flattened, so we cannot iterate over them.
if not isinstance(r.messages.SoapMessage, list):
r.messages.SoapMessage = [r.messages.SoapMessage]
for m in r.messages.SoapMessage:
print 'Message (%s): %s' % (m.errorcode, m.message) | StarcoderdataPython |
11349418 | import logging
import shutil
import tempfile
from pathlib import Path
import toml
from lambda_packager.config import Config
from lambda_packager.handle_poetry import poetry_is_used, export_poetry
from lambda_packager.handle_requirements_txt import install_requirements_txt
class NoSrcFilesFound(Exception):
pass
class LambdaAutoPackage:
def __init__(self, config=None, project_directory=None, logger=None):
if project_directory:
self.project_directory = Path(project_directory)
else:
self.project_directory = Path()
if logger:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
self.logger.debug("set up self logging")
if config:
self.config = config
else:
self.config = LambdaAutoPackage._get_config(
self.project_directory.joinpath("pyproject.toml")
)
self.src_folder = self._create_tmp_directory()
if self.config.deps_layer:
self.deps_folder = self._create_tmp_directory().joinpath("python")
self.deps_folder.mkdir(parents=True)
def execute(self):
deps_folder = self.deps_folder if self.config.deps_layer else self.src_folder
if self.project_directory.joinpath("requirements.txt").is_file():
self.logger.info("using requirements.txt file in project directory")
requirements_file_path = self.project_directory.joinpath("requirements.txt")
install_requirements_txt(
str(deps_folder), requirements_file_path=requirements_file_path
)
elif poetry_is_used(self.project_directory):
self.logger.info("using pyproject.toml file in project directory")
requirements_file_path = deps_folder.joinpath("requirements.txt")
export_poetry(
target_path=requirements_file_path,
project_directory=self.project_directory,
without_hashes=self.config.without_hashes,
)
install_requirements_txt(
str(deps_folder),
requirements_file_path=requirements_file_path,
no_deps=True,
)
else:
self.logger.warning("No dependency found, none will be packaged")
self._copy_source_files(
source_dir=self.project_directory,
target_dir=self.src_folder,
)
if self.config.deps_layer:
self.logger.info(
"layer flag is present, output will be split into lambda-src.zip and lambda-deps.zip"
)
self._create_zip_file(
self.deps_folder.parent,
str(self.project_directory.joinpath("dist/lambda-deps.zip")),
)
self._create_zip_file(
self.src_folder,
str(self.project_directory.joinpath("dist/lambda-src.zip")),
)
else:
self._create_zip_file(
self.src_folder,
str(self.project_directory.joinpath("dist/lambda.zip")),
)
@staticmethod
def _create_tmp_directory():
dirpath = tempfile.mkdtemp()
test_path = Path(dirpath)
assert test_path.exists()
assert test_path.is_dir()
return test_path
def _copy_source_files(self, source_dir: Path, target_dir: Path):
matching_objects = LambdaAutoPackage._get_matching_files_and_folders(
self.config.src_patterns, source_dir
)
self.logger.info(f"copying {len(matching_objects)} matching_objects")
self.logger.debug(f"copying {matching_objects} matching_objects")
copied_locations = []
for src in matching_objects:
relative_path = src.relative_to(source_dir)
new_location = target_dir.joinpath(relative_path)
if self._is_ignored_file(src.resolve()):
self.logger.warning(f"skipping path {src.resolve()}")
elif src.is_file():
self.copy_file(src, new_location, copied_locations)
elif src.is_dir():
self.copy_directory(src, new_location, copied_locations)
else:
self.logger.warning(f"the path '{src}' was nether a file or directory")
if len(copied_locations) <= 0:
raise NoSrcFilesFound(
"No src files were found. This is likely a problem. Exiting now to highlight this"
)
copied_locations_string = "\n".join(copied_locations)
self.logger.info(f"copied the following locations: \n{copied_locations_string}")
def copy_file(self, src, new_location, copied_locations):
self.logger.debug(f"about to copy file from {src} --> {new_location}")
new_location.parent.mkdir(exist_ok=True, parents=True)
copied_locations.append(str(shutil.copyfile(src, new_location)))
def copy_directory(self, src, new_location, copied_locations):
self.logger.debug(f"about to copy directory from {src} --> {new_location}")
copied_locations.append(
str(
shutil.copytree(
src=str(src),
dst=str(new_location),
dirs_exist_ok=True,
ignore=(
self._is_ignored_file_list
if self.config.ignore_hidden_files or self.config.ignore_folders
else None
),
)
)
)
def _is_ignored_file_list(self, src, files):
if self._is_ignored_file(Path(src).resolve()):
self.logger.warning(f"skipping folder {Path(src).resolve()}")
return files
files_to_skip = {}
for file in files:
path = Path(file).resolve()
if self._is_ignored_file(path):
files_to_skip[file] = path
if files_to_skip:
self.logger.warning(f"skipping path {list(files_to_skip.values())}")
return files_to_skip.keys()
def _is_ignored_file(self, resolved_path: Path):
path = str(resolved_path)
if self.config.ignore_hidden_files:
if path.startswith(".") or "/." in path:
return True
for folder in self.config.ignore_folders:
if f"/{folder}" in path:
return True
return False
@staticmethod
def _get_matching_files_and_folders(pattern_list, source_dir):
matching_objects = set()
for pattern in pattern_list:
matching_objects.update(source_dir.rglob(pattern))
return matching_objects
@staticmethod
def _create_zip_file(source_dir, target):
if target.endswith(".zip"):
Path(target).parent.mkdir(exist_ok=True)
name = target[: -len(".zip")]
shutil.make_archive(base_name=name, format="zip", root_dir=source_dir)
else:
raise ValueError(
f"given target path '{target}' does not end with correct extension. should end with '.zip'"
)
@staticmethod
def _read_config(file: Path):
config = toml.loads(file.read_text())
try:
return config["tool"]["lambda-packager"]
except KeyError:
logging.warning("no config found!")
return {}
@staticmethod
def _get_config(file: Path):
if not file.is_file():
logging.warning("no config file found!")
return Config()
config_dict = LambdaAutoPackage._read_config(file)
return Config(**config_dict)
| StarcoderdataPython |
9703248 | <gh_stars>0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import time
import subprocess
import json
import re
import copy
import socket
DIR_LOG_BASE = "/var/log/xf-traffic-generator"
UNIQUE_KEY = "unique"
MAIN_EXEC_NAME = "./xf-generator-main"
DAEMON_EXEC_NAME = "./xf-generator-daemon"
DAEMON_PY_NAME = "../tools/xf-daemon.py"
CONF_FILE_NAME = "conf.json"
MAIN_EXEC_OUT = "xf-generator-main.out"
DAEMON_EXEC_OUT = "xf-generator-daemon.out"
DAEMON_PY_OUT = "xf-daemon-py.out"
OK_STR = "===== xf-generator"
class NonzeroDict(dict):
def __init__(self, d):
dict.__init__(self, d)
for key, val in d.items():
if not val:
self.pop(key)
continue
if isinstance(val, (str,)):
if val.isdigit():
val = int(val)
if val:
self[key] = val
else:
self.pop(key)
continue
elif isinstance(val, dict):
tmp = NonzeroDict(val)
if tmp:
self[key] = tmp
else:
self.pop(key)
continue
elif isinstance(val, list):
dstarray = []
for i in val:
tmp = NonzeroDict(i)
if tmp:
dstarray.append(tmp)
if dstarray:
self[key] = dstarray
else:
self.pop(key)
continue
self[key] = val
class StatDict(dict):
def __init__(self, d):
dict.__init__(self, d)
for key, val in d.items():
if isinstance(val, (str,)) and val.isdigit():
self[key] = int(val)
elif isinstance(val, dict):
self[key] = StatDict(val)
elif isinstance(val, list) and len(val):
if isinstance(val[0], dict):
self[key] = list(map(lambda x :StatDict(x), val))
elif isinstance(val[0], (str,)):
self[key] = list(map(lambda x :int(x), val))
else:
self[key] = val
else:
self[key] = val
@staticmethod
def keys_all(me, oth):
return list(set(me.keys()) | set(oth.keys()))
@staticmethod
def add_sub(me, oth, to_add):
ret = {}
for key in StatDict.keys_all(me, oth):
val_me = me.get(key, 0)
val_oth = oth.get(key, 0)
if key in me and key not in oth:
if isinstance(val_me, (str)) and str(val_me).isdigit():
val_me = int(val_me)
ret[key] = val_me
continue
if key not in me and key in oth:
if isinstance(val_oth, (str)) and str(val_oth).isdigit():
val_oth = int(val_oth)
ret[key] = val_oth
continue
if isinstance(val_me, (str, int)) and isinstance(val_oth, (str, int)) and \
str(val_me).isdigit() and str(val_oth).isdigit():
if to_add:
ret[key] = int(val_me) + int(val_oth)
else:
ret[key] = int(val_me) - int(val_oth)
elif type(val_me) == type(val_oth):
if isinstance(val_me, dict):
if to_add:
ret[key] = StatDict(val_me) + StatDict(val_oth)
else:
ret[key] = StatDict(val_me) - StatDict(val_oth)
elif isinstance(val_me, list) and len(val_me):
if isinstance(val_me[0], dict):
if to_add:
ret[key] = list(map(lambda x :StatDict(x[0]) + StatDict(x[1]), zip(val_me, val_oth)))
else:
ret[key] = list(map(lambda x :StatDict(x[0]) - StatDict(x[1]), zip(val_me, val_oth)))
elif isinstance(val_me[0], (str, int)):
maxitems = max(len(val_me), len(val_oth))
diff = abs(len(val_me) - len(val_oth))
if diff:
if len(val_me) < maxitems:
val_me.extend([0] * diff)
else:
val_oth.extend([0] * diff)
if to_add:
ret[key] = list(map(lambda x :int(x[0]) + int(x[1]) ,zip(val_me, val_oth)))
else:
ret[key] = list(map(lambda x :int(x[0]) - int(x[1]) ,zip(val_me, val_oth)))
return ret
def __add__(self, oth):
return self.add_sub(self, oth, 1)
def __sub__(self, oth):
return self.add_sub(self, oth, 0)
@staticmethod
def mul_div(me, divnum, to_mul):
ret = {}
for key, val_me in me.items():
if isinstance(val_me, (str, int)) and str(val_me).isdigit():
if to_mul:
ret[key] = int(val_me) * divnum
else:
ret[key] = int(val_me) / divnum
elif isinstance(val_me, dict):
if to_mul:
ret[key] = StatDict(val_me) * divnum
else:
ret[key] = StatDict(val_me) / divnum
elif isinstance(val_me, list) and len(val_me):
if isinstance(val_me[0], dict):
if to_mul:
ret[key] = list(map(lambda x :StatDict(x) * divnum,val_me))
else:
ret[key] = list(map(lambda x :StatDict(x) / divnum,val_me))
elif isinstance(val_me[0], (str, int)):
if to_mul:
ret[key] = list(map(lambda x :int(x) * divnum,val_me))
else:
ret[key] = list(map(lambda x :int(x) / divnum,val_me))
return ret
def __div__(self, divnum):
return self.mul_div(self, divnum, 0)
def __mul__(self, divnum):
return self.mul_div(self, divnum, 1)
def __int__(self):
for key, val in self.items():
if isinstance(val, (str,)):
self[key] = int(val)
elif isinstance(val, dict):
self[key] = int(StatDict(val))
elif isinstance(val, list):
ret[key] = list(map(lambda x :StatDict(x) * divnum,val_me))
def run_cmd_wrapper(cmd, check_interval=0.5, timeout=10, asyncdo=False):
ret = -1
fin = None
outstr = ""
errstr = ""
toreal = int(timeout / check_interval)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if asyncdo:
return p
time.sleep(check_interval)
for _ in range(toreal):
fin = p.poll()
if fin is not None:
break
time.sleep(check_interval)
if fin is None:
errstr = "[wrapper] cmd [%s] not finish." % cmd
try:
p.kill()
except:
pass
try:
p.terminate()
except:
pass
else:
ret = fin
if p.stdout:
outstr = p.stdout.read()
if p.stderr:
errstr = p.stderr.read()
return (ret, outstr, errstr)
def get_unique_pids(unique):
pids = {
"main_exec": None,
"daemon_exec": None,
"daemon_py": None
}
pidsarray = []
cmd = "ps aux|grep '%s .*--%s=%s '|grep -v grep|awk '{print $2}'" % (MAIN_EXEC_NAME, UNIQUE_KEY, unique)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=2)
if outstr:
outstr = outstr.strip()
if outstr.isdigit():
pids["main_exec"] = int(outstr)
pidsarray.append(int(outstr))
cmd = "ps aux|grep '%s .*--%s=%s '|grep -v grep|awk '{print $2}'" % (DAEMON_EXEC_NAME, UNIQUE_KEY, unique)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=2)
if outstr:
outstr = outstr.strip()
if outstr.isdigit():
pids["daemon_exec"] = int(outstr)
pidsarray.append(int(outstr))
cmd = "ps aux|grep '%s .*--%s=%s '|grep -v grep|awk '{print $2}'" % (DAEMON_PY_NAME, UNIQUE_KEY, unique)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=2)
if outstr:
outstr = outstr.strip()
if outstr.isdigit():
pids["daemon_py"] = int(outstr)
pidsarray.append(int(outstr))
return pidsarray
def ok_str_found(filename):
cmd = 'cat %s 2>&1 | grep "%s" > /dev/null' % (filename, OK_STR)
if os.system(cmd):
return False
return True
def is_local_port_in_use(port):
s = socket.socket()
try:
s.bind(("127.0.0.1", int(port)))
s.listen(1)
except:
return True
finally:
try:
s.close()
except:
pass
return False
def get_unique_lport(unique):
cmd = 'ps -e -o pid,command|egrep "%s .*\-\-%s=%s "' % (DAEMON_EXEC_NAME, UNIQUE_KEY, unique)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=2)
if not outstr:
return None
outstr = outstr.strip()
if not outstr:
return None
m = re.match(r'^.+ -p (\d+)', str(outstr))
if not m:
return None
return int(m.groups()[0])
| StarcoderdataPython |
11385448 | import yaml
from sigma.parser.collection import SigmaCollectionParser
from sigma.parser.exceptions import SigmaParseError, SigmaCollectionParseError
from stix2 import CustomObject, KillChainPhase, properties
from yeti.core.errors import ValidationError
from .indicator_base import Indicator
@CustomObject('x-sigma', [
('labels', properties.StringProperty(required=True)),
('name', properties.StringProperty()),
('description', properties.StringProperty()),
('pattern', properties.StringProperty(required=True)),
('valid_from', properties.TimestampProperty(required=True)),
('valid_until', properties.TimestampProperty()),
('kill_chain_phases', properties.ListProperty(KillChainPhase))
])
class StixSigma():
def __init__(self, pattern=None, **_):
try:
SigmaCollectionParser(self.pattern)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise ValidationError('{0:s} is not a valid YAML markup: {1!s}'.format(pattern, e))
except (SigmaParseError, SigmaCollectionParseError) as e:
raise ValidationError('{0:s} is not a valid Sigma rule: {1!s}'.format(pattern, e))
class Sigma(Indicator):
"""Sigma rule STIX extension object.
Extends the Indicator STIX2 definition.
"""
type = 'x-sigma'
@property
def name(self):
return self._stix_object.name
@property
def description(self):
return self._stix_object.description
@property
def pattern(self):
return self._stix_object.pattern
@property
def valid_from(self):
return self._stix_object.valid_from
@property
def valid_until(self):
return self._stix_object.valid_until
@property
def kill_chain_phases(self):
return self._stix_object.kill_chain_phases
Indicator.datatypes[Sigma.type] = Sigma
| StarcoderdataPython |
9799001 | # -*- coding: utf-8 -*-
# The main program for Markov chain Monte Carlo simulation
# This program requires four initial parameters, k, r, T and nsteps.
# where k is the number of nodes, r is the weight coefficient.
# T is temperature, nsteps is total number of steps, i.e., graphs.
# They are specified at the beginning of the program.
# solve path problem
import sys
import os
# get source file directory so that the program can be executable from anywhere
dir_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))
sys.path.append(dir_root)
# Begin importing packages
from graph_init import GetInit
from update_graph import UpdateGraph
from monte_carlo import MonteCarlo
from draw import DrawGraph
# Set parameters
k = 8
r = 1
T = 300
nsteps = 300
# calculate number of all possible edges
etot = k*(k-1)/2
# aliases of my own modules
gi = GetInit()
ug = UpdateGraph()
mc = MonteCarlo()
pg = DrawGraph()
# Generate initial graph and all the inital properties needed for future use
# tmp is the initial graph and pos is the matrix storing positions
# and later 'tmp' will also be the name of all current graphs
tmp, pos = gi.init_graph(k)
# calculate weights of all possible edges
w = gi.calc_weight(pos, k)
# initialize the list to store all the edge list in the type of string
edge_list = [None]*0
# write initial edge list into edge_list[0] as a string
edge_list.append(str(tmp.edges()))
# get the the number of neighbors of node 0
neighbor_0 = len(tmp.neighbors(0))
# get the number of edges in the whole graph
n_edge = tmp.number_of_edges()
# check necessary edges of the zeroth graph
keep_i = ug.chk_bridges(tmp)
# calculate the inverse of probability q(j|i) for the zeroth graph
prob_i = mc.calc_prob(keep_i, etot)
# calculate thetas for zeroth graphs
theta_i, max_i = mc.calc_theta(tmp, w, r)
# sum the maximum length
sum_max_len = max_i
# I/O part
expectations = open('output', 'w')
edges = open('edgelist', 'w')
summary = open('summary', 'w')
# output information
print('{}{}'.format('total number of nodes: ', k), file=summary)
print('{}{}'.format('nodes positions: ', pos), file=summary)
print('{}'.format(tmp.edges()), file=edges)
print('{}{}{}{}{:6d}{:9.4f}{:9.4f}{:9.4f}'.format('nsteps ', 'degree_0 ', 'n_edges ', 'max_len',
1, neighbor_0, n_edge, sum_max_len), file=expectations)
# begin loop over all steps
for i in range(1, nsteps):
# get the new graph candidate tmp_new
tmp_new = ug.change_edges(k, tmp, keep_i)
# check necessary edges of the candidate
keep_j = ug.chk_bridges(tmp_new)
# calculate the inverse of the probability q(i|j)
prob_j = mc.calc_prob(keep_j, etot)
# calculate theta and maximum path length for candidate
theta_j, max_j = mc.calc_theta(tmp_new, w, r)
# run Metropolis
tmp, theta_i, prob_i, keep_i, max_i = mc.metropolis(theta_i, theta_j, prob_i, prob_j,
T, tmp, tmp_new, keep_i, keep_j, max_i, max_j)
# write edge list into edge_list
edge_list.append(str(tmp.edges()))
# quantities required for output
neighbor_0 += len(tmp.neighbors(0))
n_edge += tmp.number_of_edges()
sum_max_len += max_i
# for checking propose
# drawing of the 'trajectory' can be pretty cool
print('{:6d}{:9.4f}{:9.4f}{:9.4f}'.format(i+1, neighbor_0/(i+1), n_edge/(i+1), sum_max_len/(i+1)), file=expectations)
print('{}'.format(tmp.edges()), file=edges)
# print out the expected numbers of edges connected to vertex 0 and edges in the entire graph
print('{}{}{}{}{}'.format('the expected number of edges connected to vertex 0: ', neighbor_0/nsteps, "\n",
'the expected number of edges in the entire graph: ', n_edge/nsteps), file=summary)
# demo of output, just draw the last graph in the list
# nth graph needed
nth = nsteps
pg.draw_this_graph(k, pos, nth, edge_list)
# output sorted histogram and draw the most probable graph
# since the data will be truncated adn over-written for counting histogram under equilibrium
# it's recommended to draw the arbitrary one first
pg.draw_most_probable(k, pos, edge_list, summary)
# close files
expectations.close()
edges.close()
summary.close()
| StarcoderdataPython |
9700939 | <reponame>ymetz/local_interpretability
from tcav.tcav import TCAV
import tcav.model as cm
import tcav.utils as utils
import os
import pickle
import image_crawler
from dataservice import get_model_list, get_dataset_list
import tcav.activation_generator as act_gen
import tensorflow as tf
def load_tcavs(model, dataset):
tcav_scores = {}
tcav_file_name = os.path.join(model.model_path, dataset.dataset_name + model.model_name
+ '-tcavscores' + '.pkl')
if os.path.isfile(tcav_file_name):
with open(tcav_file_name, 'rb') as f:
tcav_scores = pickle.load(f)
return tcav_scores
def run_tcav():
model = get_model_list("../../models/")[0]
dataset = get_dataset_list("../../datasets")[0]
dataset_name = dataset.dataset_name
id_to_labels = dataset.id_to_label
model_to_run = 'inception_v3'
tcav_dir = "../../models/tensorflow_inception_v3"
# where activations are stored (only if your act_gen_wrapper does so)
activation_dir = os.path.join(tcav_dir, 'activations/')
# where CAVs are stored.
# You can say None if you don't wish to store any.
cav_dir = os.path.join(tcav_dir, 'cavs/')
concept_directory = "../../datasets/tcav_concepts"
target_directory = "../../datasets/targets"
bottlenecks = ['Mixed_5d'] # @param
utils.make_dir_if_not_exists(activation_dir)
utils.make_dir_if_not_exists(cav_dir)
# this is a regularizer penalty parameter for linear classifier to get CAVs.
alphas = [0.1]
# a folder that random images are stored
random_counterpart = 'random_images'
targets = ['zebra']
concepts = ["dotted", "striped", "zigzagged", "irregular pattern", "gradient", "single color"]
#crawl images for concepts and target class
for concept in concepts:
if not os.path.isdir(os.path.join(concept_directory, concept)):
image_crawler.crawl_images(concept_directory, concept, N=50)
# if not os.path.isdir(os.path.join(concept_directory, random_counterpart)):
# image_crawler.crawl_images(concept_directory, 'image', N=500)
for target in targets:
if not os.path.isdir(os.path.join(target_directory, target)):
image_crawler.crawl_images(target_directory, target, N=50)
the_model = cm.InceptionV3Wrapper_custom(model.session,
model,
id_to_labels)
act_generator = act_gen.ImageActivationGenerator(the_model, concept_directory, activation_dir, max_examples=100)
tf.logging.set_verbosity(0)
tcav_dict = {}
for target in targets:
mytcav = TCAV(model.session,
target,
concepts,
bottlenecks,
act_generator,
alphas,
random_counterpart,
cav_dir=cav_dir,
num_random_exp=5)
results = mytcav.run()
# we have to subtract 1 from the target class, as it corresponds with our ground truth labels,
# internally the network outputs are shifted by one, as 0 represents the background class instead of -1
summary = utils.print_results(results, class_id=the_model.label_to_id(target)-1, result_dict=tcav_dict)
tcav_file_path = os.path.join(model.model_path, dataset_name + model.model_name + '-tcavscores' + '.pkl')
with open(tcav_file_path, 'wb') as f:
pickle.dump(tcav_dict, f, pickle.HIGHEST_PROTOCOL)
def jsonKeys2int(x):
if isinstance(x, dict):
return {int(k):v for k,v in x.items()}
return x
| StarcoderdataPython |
6529784 | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import cunumeric as num
np.random.seed(0)
def allclose(A, B):
if B.dtype == np.float32 or B.dtype == np.complex64:
l2 = (A - B) * np.conj(A - B)
l2 = np.sqrt(np.sum(l2) / np.sum(A * np.conj(A)))
return l2 < 1e-6
else:
return np.allclose(A, B)
def check_1d_r2c(N, dtype=np.float64):
print(f"\n=== 1D R2C {dtype} ===")
Z = np.random.rand(N).astype(dtype)
Z_num = num.array(Z)
out = np.fft.rfft(Z)
out_num = num.fft.rfft(Z_num)
assert allclose(out, out_num)
out = np.fft.rfft(Z, norm="forward")
out_num = num.fft.rfft(Z_num, norm="forward")
assert allclose(out, out_num)
out = np.fft.rfft(Z, n=N // 2)
out_num = num.fft.rfft(Z_num, n=N // 2)
assert allclose(out, out_num)
out = np.fft.rfft(Z, n=N // 2 + 1)
out_num = num.fft.rfft(Z_num, n=N // 2 + 1)
assert allclose(out, out_num)
out = np.fft.rfft(Z, n=N * 2)
out_num = num.fft.rfft(Z_num, n=N * 2)
assert allclose(out, out_num)
out = np.fft.rfft(Z, n=N * 2 + 1)
out_num = num.fft.rfft(Z_num, n=N * 2 + 1)
assert allclose(out, out_num)
# Odd types
out = np.fft.irfft(Z)
out_num = num.fft.irfft(Z_num)
assert allclose(out, out_num)
out = np.fft.hfft(Z)
out_num = num.fft.hfft(Z_num)
assert allclose(out, out_num)
assert allclose(Z, Z_num)
def check_2d_r2c(N, dtype=np.float64):
print(f"\n=== 2D R2C {dtype} ===")
Z = np.random.rand(*N).astype(dtype)
Z_num = num.array(Z)
out = np.fft.rfft2(Z)
out_num = num.fft.rfft2(Z_num)
assert allclose(out, out_num)
out = np.fft.rfft2(Z, norm="forward")
out_num = num.fft.rfft2(Z_num, norm="forward")
assert allclose(out, out_num)
out = np.fft.rfft2(Z, s=(N[0] // 2, N[1] - 2))
out_num = num.fft.rfft2(Z_num, s=(N[0] // 2, N[1] - 2))
assert allclose(out, out_num)
out = np.fft.rfft2(Z, s=(N[0] + 1, N[0] + 2))
out_num = num.fft.rfft2(Z_num, s=(N[0] + 1, N[0] + 2))
assert allclose(out, out_num)
out = np.fft.rfft2(Z, s=(N[0] // 2 + 1, N[0] + 2))
out_num = num.fft.rfft2(Z_num, s=(N[0] // 2 + 1, N[0] + 2))
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[0])
out_num = num.fft.rfft2(Z_num, axes=[0])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[1])
out_num = num.fft.rfft2(Z_num, axes=[1])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[-1])
out_num = num.fft.rfft2(Z_num, axes=[-1])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[-2])
out_num = num.fft.rfft2(Z_num, axes=[-2])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[0, 1])
out_num = num.fft.rfft2(Z_num, axes=[0, 1])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[1, 0])
out_num = num.fft.rfft2(Z_num, axes=[1, 0])
assert allclose(out, out_num)
out = np.fft.rfft2(Z, axes=[1, 0, 1])
out_num = num.fft.rfft2(Z_num, axes=[1, 0, 1])
assert allclose(out, out_num)
# Odd types
out = np.fft.irfft2(Z)
out_num = num.fft.irfft2(Z_num)
assert allclose(out, out_num)
out = np.fft.hfft(Z)
out_num = num.fft.hfft(Z_num)
assert allclose(out, out_num)
assert allclose(Z, Z_num)
def check_3d_r2c(N, dtype=np.float64):
print(f"\n=== 3D R2C {dtype} ===")
Z = np.random.rand(*N).astype(dtype)
Z_num = num.array(Z)
out = np.fft.rfftn(Z)
out_num = num.fft.rfftn(Z_num)
assert allclose(out, out_num)
out = np.fft.rfftn(Z, norm="forward")
out_num = num.fft.rfftn(Z_num, norm="forward")
assert allclose(out, out_num)
out = np.fft.rfftn(Z, norm="ortho")
out_num = num.fft.rfftn(Z_num, norm="ortho")
assert allclose(out, out_num)
out = np.fft.rfftn(Z, s=(N[0] - 1, N[1] - 2, N[2] // 2))
out_num = num.fft.rfftn(Z_num, s=(N[0] - 1, N[1] - 2, N[2] // 2))
assert allclose(out, out_num)
out = np.fft.rfftn(Z, s=(N[0] + 1, N[1] + 2, N[2] + 3))
out_num = num.fft.rfftn(Z_num, s=(N[0] + 1, N[1] + 2, N[2] + 3))
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[0])
out_num = num.fft.rfftn(Z_num, axes=[0])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[1])
out_num = num.fft.rfftn(Z_num, axes=[1])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[2])
out_num = num.fft.rfftn(Z_num, axes=[2])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[-1])
out_num = num.fft.rfftn(Z_num, axes=[-1])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[-2])
out_num = num.fft.rfftn(Z_num, axes=[-2])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[-3])
out_num = num.fft.rfftn(Z_num, axes=[-3])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[2, 1])
out_num = num.fft.rfftn(Z_num, axes=[2, 1])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[0, 2])
out_num = num.fft.rfftn(Z_num, axes=[0, 2])
assert allclose(out, out_num)
out = np.fft.rfftn(Z, axes=[0, 2, 1, 1, -1])
out_num = num.fft.rfftn(Z_num, axes=[0, 2, 1, 1, -1])
assert allclose(out, out_num)
# Odd types
out = np.fft.fftn(Z)
out_num = num.fft.fftn(Z_num)
assert allclose(out, out_num)
out = np.fft.ifftn(Z)
out_num = num.fft.ifftn(Z_num)
assert allclose(out, out_num)
out = np.fft.irfftn(Z)
out_num = num.fft.irfftn(Z_num)
assert allclose(out, out_num)
out = np.fft.hfft(Z)
out_num = num.fft.hfft(Z_num)
assert allclose(out, out_num)
assert allclose(Z, Z_num)
def test_deferred_1d():
check_1d_r2c(N=10001)
check_1d_r2c(N=10001, dtype=np.float32)
def test_deferred_2d():
check_2d_r2c(N=(128, 512))
check_2d_r2c(N=(128, 512), dtype=np.float32)
def test_deferred_3d():
check_3d_r2c(N=(64, 40, 100))
check_3d_r2c(N=(64, 40, 100), dtype=np.float32)
def test_eager_1d():
check_1d_r2c(N=153)
check_1d_r2c(N=153, dtype=np.float32)
def test_eager_2d():
check_2d_r2c(N=(28, 10))
check_2d_r2c(N=(28, 10), dtype=np.float32)
def test_eager_3d():
check_3d_r2c(N=(6, 10, 12))
check_3d_r2c(N=(6, 10, 12), dtype=np.float32)
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
| StarcoderdataPython |
122891 | <reponame>focusexplorer/AI_hand_digit_recognition<filename>test.py
from PIL import Image
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import glob
import re
import copy
def relu(x):
y=copy.deepcopy(x);
y[y<0]*=0.1;
# print 'x=',x,'y=',y
return y;
# if x>=0:
# return x;
# else:
# return 0.1*x;
def drelu(x):
y=copy.deepcopy(x);
y[y>=0]=1;
y[y<0]=0.1;
# print 'x=',x,'y=',y
return y;
# if x>=0:
# return 1;
# else:
# return 0.1;
x=np.random.random((5,5))-0.5;
print x;
print relu(x);
print drelu(x);
sys.exit();
def change(e):
e[1,1]=1000;
def cv(d):
d[1,1]=100;
change(d);
return d;
b1=np.zeros((10,10),dtype=np.double)
print b1
c=b1;
f=cv(c);
print b1
f[1,1]=555;
print 'f=',f
x1=10*np.ones((10,10),dtype=np.double)
print 1/x1
def sigmoid(x):
return 1 / (1 + np.e ** -x);
def sigmoid_all(ax):
print 'shape[0]=',ax.shape[0]
print 'shape[1]=',ax.shape[1]
for i in range(ax.shape[0]):
for j in range(ax.shape[1]):
ax[i,j]=sigmoid(ax[i,j]);
return ax;
def active(x):
return sigmoid(x);
return relu(x);
def dactive(x):
return dsigmoid(x);
return drelu(x);
def active_all(x):
return sigmoid_all(x);
return relu_all(x);
print '=================='
f=np.ones((10,1),dtype=np.double)
#f=f[0,:];
print f
print active(f)
print active_all(f) | StarcoderdataPython |
240252 | def function(request):
return "👋 Hello from python functions-framework"
| StarcoderdataPython |
90635 | <reponame>AlexanderAivazidis/scvelo<filename>scvelo/tools/pymc/pymc3_model.py
# -*- coding: utf-8 -*-
r"""Base Pymc3 model class for all models in pymc3"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import theano
from pymc3.variational.callbacks import CheckParametersConvergence
from tqdm.auto import tqdm
from scvelo.tools.pymc.base_model import BaseModel
# base model class - defining shared methods but not the model itself
class Pymc3Model(BaseModel):
r"""This class provides functions to train PyMC3 models and sample their parameters.
A model must have a main X_data input and can have arbitrary self.extra_data inputs.
Parameters
----------
X_data :
Numpy array of gene expression (cols) in spatial locations (rows)
all other:
the rest are arguments for parent class BaseModel
"""
def __init__(
self,
X_data: np.ndarray,
data_type: str = 'float32',
n_iter: int = 200000,
learning_rate=0.001,
total_grad_norm_constraint=200,
verbose=True,
var_names=None, var_names_read=None,
obs_names=None, sample_id=None
):
############# Initialise parameters ################
super().__init__(X_data,
data_type, n_iter,
learning_rate, total_grad_norm_constraint,
verbose, var_names, var_names_read,
obs_names, sample_id)
# Create dictionaries storing results
self.advi = {}
self.mean_field = {}
self.samples = {}
self.node_samples = {}
self.n_type = 'restart' # default
# Pass data to theano
self.x_data = theano.shared(X_data.astype(self.data_type))
def sample_prior(self, samples=10):
r"""Take samples from the prior, see `pymc3.sample_prior_predictive` for details
Parameters
----------
samples :
(Default value = 10)
Returns
-------
dict
self.prior_trace dictionary with an element for each parameter of the mode
"""
# Take samples from the prior
with self.model:
self.prior_trace = pm.sample_prior_predictive(samples=samples)
def fit_advi_iterative(self, n=3, method='advi', n_type='restart',
n_iter=None,
learning_rate=None, reducing_lr=False,
progressbar=True,
scale_cost_to_minibatch=True):
"""Find posterior using pm.ADVI() method directly (allows continuing training through `refine` method.
(maximising likelihood of the data and minimising KL-divergence of posterior to prior - ELBO loss)
Parameters
----------
n :
number of independent initialisations (Default value = 3)
method :
advi', to allow for potential use of SVGD, MCMC, custom (currently only ADVI implemented). (Default value = 'advi')
n_type :
type of repeated initialisation:
* **'restart'** to pick different initial value,
* **'cv'** for molecular cross-validation - splits counts into n datasets, for now, only n=2 is implemented
* **'bootstrap'** for fitting the model to multiple downsampled datasets.
Run `mod.bootstrap_data()` to generate variants of data (Default value = 'restart')
n_iter :
number of iterations, supersedes self.n_iter specified when creating model instance. (Default value = None)
learning_rate :
learning rate, supersedes self.learning_rate specified when creating model instance. (Default value = None)
reducing_lr :
boolean, use decaying learning rate? (Default value = False)
progressbar :
boolean, show progress bar? (Default value = True)
scale_cost_to_minibatch :
when using training in minibatches, scale cost function appropriately?
See discussion https://discourse.pymc.io/t/effects-of-scale-cost-to-minibatch/1429 to understand the effects. (Default value = True)
Returns
-------
None
self.mean_field dictionary with MeanField pymc3 objects,
and self.advi dictionary with ADVI objects for each initialisation.
"""
self.n_type = n_type
self.scale_cost_to_minibatch = scale_cost_to_minibatch
if n_iter is None:
n_iter = self.n_iter
if learning_rate is None:
learning_rate = self.learning_rate
### Initialise optimiser ###
if reducing_lr:
# initialise the function for adaptive learning rate
s = theano.shared(np.array(learning_rate).astype(self.data_type))
def reduce_rate(a, h, i):
s.set_value(np.array(learning_rate / ((i / self.n_cells) + 1) ** .7).astype(self.data_type))
optimiser = pm.adam(learning_rate=s)
callbacks = [reduce_rate, CheckParametersConvergence()]
else:
optimiser = pm.adam(learning_rate=learning_rate)
callbacks = [CheckParametersConvergence()]
if np.isin(n_type, ['bootstrap']):
if self.X_data_sample is None:
self.bootstrap_data(n=n)
elif np.isin(n_type, ['cv']):
self.generate_cv_data() # cv data added to self.X_data_sample
init_names = ['init_' + str(i + 1) for i in np.arange(n)]
for i, name in enumerate(init_names):
with self.model:
self.advi[name] = pm.ADVI(random_seed = 99)
# when type is molecular cross-validation or bootstrap,
# replace self.x_data tensor with new data
if np.isin(n_type, ['cv', 'bootstrap']):
# defining minibatch
if self.minibatch_size is not None:
# minibatch main data - expression matrix
self.x_data_minibatch = pm.Minibatch(self.X_data_sample[i].astype(self.data_type),
batch_size=[self.minibatch_size, None],
random_seed=self.minibatch_seed[i])
more_replacements = {self.x_data: self.x_data_minibatch}
# if any other data inputs should be minibatched add them too
if self.extra_data is not None:
# for each parameter in the dictionary add it to more_replacements
for k in self.extra_data.keys():
more_replacements[self.extra_data_tt[k]] = \
pm.Minibatch(self.extra_data[k].astype(self.data_type),
batch_size=[self.minibatch_size, None],
random_seed=self.minibatch_seed[i])
# or using all data
else:
more_replacements = {self.x_data: self.X_data_sample[i].astype(self.data_type)}
# if any other data inputs should be added
if self.extra_data is not None:
# for each parameter in the dictionary add it to more_replacements
for k in self.extra_data.keys():
more_replacements[self.extra_data_tt[k]] = \
self.extra_data[k].astype(self.data_type)
else:
# defining minibatch
if self.minibatch_size is not None:
# minibatch main data - expression matrix
self.x_data_minibatch = pm.Minibatch(self.X_data.astype(self.data_type),
batch_size=[self.minibatch_size, None],
random_seed=self.minibatch_seed[i])
more_replacements = {self.x_data: self.x_data_minibatch}
# if any other data inputs should be minibatched add them too
if self.extra_data is not None:
# for each parameter in the dictionary add it to more_replacements
for k in self.extra_data.keys():
more_replacements[self.extra_data_tt[k]] = \
pm.Minibatch(self.extra_data[k].astype(self.data_type),
batch_size=[self.minibatch_size, None],
random_seed=self.minibatch_seed[i])
else:
more_replacements = {}
self.advi[name].scale_cost_to_minibatch = scale_cost_to_minibatch
# train the model
self.mean_field[name] = self.advi[name].fit(n_iter, callbacks=callbacks,
obj_optimizer=optimiser,
total_grad_norm_constraint=self.total_grad_norm_constraint,
progressbar=progressbar, more_replacements=more_replacements,
)
# plot training history
if self.verbose:
print(plt.plot(np.log10(self.mean_field[name].hist[15000:])));
def plot_history(self, iter_start=0, iter_end=-1):
"""Plot loss function (ELBO) across training history
Parameters
----------
iter_start :
omit initial iterations from the plot (Default value = 0)
iter_end :
omit last iterations from the plot (Default value = -1)
"""
for i in self.mean_field.keys():
print(plt.plot(np.log10(self.mean_field[i].hist[iter_start:iter_end])))
def sample_posterior(self, node='all', n_samples=1000,
save_samples=False, return_samples=True,
mean_field_slot='init_1'):
"""Sample posterior distribution of all parameters or single parameter
Parameters
----------
node :
pymc3 node to sample (e.g. default "all", self.spot_factors)
n_samples :
number of posterior samples to generate (1000 is recommended, reduce if you get GPU memory error) (Default value = 1000)
save_samples :
save all samples, not just the mean, 5% and 95% quantile, SD. (Default value = False)
return_samples :
return summarised samples (mean, etc) in addition to saving them in `self.samples`? (Default value = True)
mean_field_slot :
string, which training initialisation (mean_field slot) to sample? 'init_1' by default
Returns
-------
dict
dictionary `self.samples` (mean, 5% quantile, SD, optionally all samples) with dictionaries
with numpy arrays for each parameter.
Plus an optional dictionary in `self.samples` with all samples of parameters
as numpy arrays of shape ``(n_samples, ...)``
"""
theano.config.compute_test_value = 'ignore'
if node == 'all':
# Sample all parameters - might use a lot of GPU memory
post_samples = self.mean_field[mean_field_slot].sample(n_samples)
self.samples['post_sample_means'] = {v: post_samples[v].mean(axis=0) for v in post_samples.varnames}
self.samples['post_sample_q05'] = {v: np.quantile(post_samples[v], 0.05, axis=0) for v in
post_samples.varnames}
self.samples['post_sample_q95'] = {v: np.quantile(post_samples[v], 0.95, axis=0) for v in
post_samples.varnames}
self.samples['post_sample_q01'] = {v: np.quantile(post_samples[v], 0.01, axis=0) for v in
post_samples.varnames}
self.samples['post_sample_q99'] = {v: np.quantile(post_samples[v], 0.99, axis=0) for v in
post_samples.varnames}
self.samples['post_sample_sds'] = {v: post_samples[v].std(axis=0) for v in post_samples.varnames}
if (save_samples):
# convert multitrace object to a dictionary
post_samples = {v: post_samples[v] for v in post_samples.varnames}
self.samples['post_samples'] = post_samples
else:
# Sample a singe node
post_node = self.mean_field[mean_field_slot].sample_node(node, size=n_samples).eval()
post_node_mean = post_node.mean(0)
post_node_q05 = np.quantile(post_node, 0.05, axis=0)
post_node_q95 = np.quantile(post_node, 0.95, axis=0)
post_node_q01 = np.quantile(post_node, 0.01, axis=0)
post_node_q99 = np.quantile(post_node, 0.99, axis=0)
post_node_sds = post_node.std(0)
# extract the name of the node and save to samples dictionary
node_name = node.name
self.node_samples[str(node_name) + '_mean'] = post_node_mean
self.node_samples[str(node_name) + '_q05'] = post_node_q05
self.node_samples[str(node_name) + '_q95'] = post_node_q95
self.node_samples[str(node_name) + '_q01'] = post_node_q01
self.node_samples[str(node_name) + '_q99'] = post_node_q99
self.node_samples[str(node_name) + '_sds'] = post_node_sds
if save_samples:
self.node_samples[str(node_name) + '_post_samples'] = post_node
if return_samples:
return self.samples
| StarcoderdataPython |
4810036 | <reponame>alexsharoff/BuildMigrator<filename>build_migrator/generators/_cmake/cmake_copy.py<gh_stars>10-100
import os
from .cmake_cmd import CMakeCmd
copy_tmpl = "configure_file({input} {output} COPYONLY)\n"
class CMakeCopy(CMakeCmd):
priority = 1
@staticmethod
def add_arguments(arg_parser):
pass
def __init__(self, context, project=None):
CMakeCmd.__init__(self, context, project)
@staticmethod
def _create_symlink_cmd(name, source, output):
source_rel = os.path.relpath(source, os.path.dirname(output)).replace("\\", "/")
return {
"name": name,
"type": "cmd",
"program": "cmake",
"args": ["-E", "create_symlink", source_rel, output],
"dependencies": [source],
"output": output,
}
def generate(self, target):
if target["type"] != "copy":
return False
source = target["source"]
if not self.context.get_copy_origin(source):
# origin is a file in source dir: configure_file
with self.context.open("CMakeLists.txt", "a") as cmake_file:
s = self.context.format(
copy_tmpl, input=target["source"], output=target["output"]
)
cmake_file.write(s)
else:
# origin is a file in build dir: add_custom_command
target = self._create_symlink_cmd(target["name"], source, target["output"])
CMakeCmd.generate(self, target)
return True
__all__ = ["CMakeCopy"]
| StarcoderdataPython |
6501763 | from ..imagelist import ImageList
from ..office31 import Office31
from ..officehome import OfficeHome
from ..visda2017 import VisDA2017
from typing import Optional, ClassVar, Sequence
from copy import deepcopy
__all__ = ['Office31', 'OfficeHome', "VisDA2017"]
def open_set(dataset_class: ClassVar, public_classes: Sequence[str],
private_classes: Optional[Sequence[str]] = ()) -> ClassVar:
"""
Convert a dataset into its open-set version.
In other words, those samples which doesn't belong to `private_classes` will be marked as "unknown".
Be aware that `open_set` will change the label number of each category.
Args:
dataset_class (class): Dataset class. Only subclass of ``ImageList`` can be open-set.
public_classes (sequence[str]): A sequence of which categories need to be kept in the open-set dataset.\
Each element of `public_classes` must belong to the `classes` list of `dataset_class`.
private_classes (sequence[str], optional): A sequence of which categories need to be marked as "unknown" \
in the open-set dataset. Each element of `private_classes` must belong to the `classes` list of \
`dataset_class`. Default: ().
Examples::
>>> public_classes = ['back_pack', 'bike', 'calculator', 'headphones', 'keyboard']
>>> private_classes = ['laptop_computer', 'monitor', 'mouse', 'mug', 'projector']
>>> # create a open-set dataset class which has classes
>>> # 'back_pack', 'bike', 'calculator', 'headphones', 'keyboard' and 'unknown'.
>>> OpenSetOffice31 = open_set(Office31, public_classes, private_classes)
>>> # create an instance of the open-set dataset
>>> dataset = OpenSetDataset(root="data/office31", task="A")
"""
if not (issubclass(dataset_class, ImageList)):
raise Exception("Only subclass of ImageList can be openset")
class OpenSetDataset(dataset_class):
def __init__(self, **kwargs):
super(OpenSetDataset, self).__init__(**kwargs)
samples = []
all_classes = list(deepcopy(public_classes)) + ["unknown"]
for (path, label) in self.samples:
class_name = self.classes[label]
if class_name in public_classes:
samples.append((path, all_classes.index(class_name)))
elif class_name in private_classes:
samples.append((path, all_classes.index("unknown")))
self.samples = samples
self.classes = all_classes
self.class_to_idx = {cls: idx
for idx, cls in enumerate(self.classes)}
return OpenSetDataset
def default_open_set(dataset_class: ClassVar, source: bool) -> ClassVar:
"""
Default open-set used in some paper.
Args:
dataset_class (class): Dataset class. Currently, dataset_class must be one of
:class:`~common.vision.datasets.office31.Office31`, :class:`~common.vision.datasets.officehome.OfficeHome`,
:class:`~common.vision.datasets.visda2017.VisDA2017`,
source (bool): Whether the dataset is used for source domain or not.
"""
if dataset_class == Office31:
public_classes = Office31.CLASSES[:20]
if source:
private_classes = ()
else:
private_classes = Office31.CLASSES[20:]
elif dataset_class == OfficeHome:
public_classes = sorted(OfficeHome.CLASSES)[:25]
if source:
private_classes = ()
else:
private_classes = sorted(OfficeHome.CLASSES)[25:]
elif dataset_class == VisDA2017:
public_classes = ('bicycle', 'bus', 'car', 'motorcycle', 'train', 'truck')
if source:
private_classes = ()
else:
private_classes = ('aeroplane', 'horse', 'knife', 'person', 'plant', 'skateboard')
else:
raise NotImplementedError("Unknown openset domain adaptation dataset: {}".format(dataset_class.__name__))
return open_set(dataset_class, public_classes, private_classes)
| StarcoderdataPython |
6460321 | # Time: O(n)
# Space: O(1)
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def longestMountain(self, A):
"""
:type A: List[int]
:rtype: int
"""
result, up_len, down_len = 0, 0, 0
for i in xrange(1, len(A)):
if (down_len and A[i-1] < A[i]) or A[i-1] == A[i]:
up_len, down_len = 0, 0
up_len += A[i-1] < A[i]
down_len += A[i-1] > A[i]
if up_len and down_len:
result = max(result, up_len+down_len+1)
return result
| StarcoderdataPython |
4981772 | <gh_stars>1-10
import requests, json, pandas as pd
import os
from dotenv import load_dotenv
# Returns parks (lat, long) from National Park Service API
def parks_get():
load_dotenv()
endpoint = "https://developer.nps.gov/api/v1/parks"
HEADERS = {"X-Api-Key": os.getenv("DATAGOV_KEY")}
params = {"limit":"1000"}
response = requests.get(endpoint, params = params, headers=HEADERS)
d = response.json()
parks_data = pd.json_normalize(d['data'])
return parks_data[["parkCode", "url", "fullName", "latitude", "longitude"]]
| StarcoderdataPython |
4834448 | <reponame>drumcap/vanillaPython<gh_stars>0
import networkx
import re
class RawSentence:
def __init__(self, textIter):
if type(textIter) == str:
self.textIter = textIter.split('\n')
else:
self.textIter = textIter
self.rgxSplitter = re.compile('([.!?:](?:["\']|(?![0-9])))')
def __iter__(self):
for line in self.textIter:
ch = self.rgxSplitter.split(line)
for s in map(lambda a, b: a + b, ch[::2], ch[1::2]):
if not s: continue
yield s
class RawSentenceReader:
def __init__(self, filepath):
self.filepath = filepath
self.rgxSplitter = re.compile('([.!?:](?:["\']|(?![0-9])))')
def __iter__(self):
for line in open(self.filepath, encoding='utf-8'):
ch = self.rgxSplitter.split(line)
for s in map(lambda a, b: a + b, ch[::2], ch[1::2]):
if not s: continue
yield s
class RawTagger:
def __init__(self, textIter, tagger=None):
if tagger:
self.tagger = tagger
else:
from konlpy.tag import Komoran
self.tagger = Komoran()
if type(textIter) == str:
self.textIter = textIter.split('\n')
else:
self.textIter = textIter
self.rgxSplitter = re.compile('([.!?:](?:["\']|(?![0-9])))')
def __iter__(self):
for line in self.textIter:
ch = self.rgxSplitter.split(line)
for s in map(lambda a, b: a + b, ch[::2], ch[1::2]):
if not s: continue
yield self.tagger.pos(s)
class RawTaggerReader:
def __init__(self, filepath, tagger=None):
if tagger:
self.tagger = tagger
else:
from konlpy.tag import Komoran
self.tagger = Komoran()
self.filepath = filepath
self.rgxSplitter = re.compile('([.!?:](?:["\']|(?![0-9])))')
def __iter__(self):
for line in open(self.filepath, encoding='utf-8'):
ch = self.rgxSplitter.split(line)
for s in map(lambda a, b: a + b, ch[::2], ch[1::2]):
if not s: continue
yield self.tagger.pos(s)
class TextRank:
def __init__(self, **kargs):
self.graph = None
self.window = kargs.get('window', 5)
self.coef = kargs.get('coef', 1.0)
self.threshold = kargs.get('threshold', 0.005)
self.dictCount = {}
self.dictBiCount = {}
self.dictNear = {}
self.nTotal = 0
def load(self, sentenceIter, wordFilter=None):
def insertPair(a, b):
if a > b:
a, b = b, a
elif a == b:
return
self.dictBiCount[a, b] = self.dictBiCount.get((a, b), 0) + 1
def insertNearPair(a, b):
self.dictNear[a, b] = self.dictNear.get((a, b), 0) + 1
for sent in sentenceIter:
for i, word in enumerate(sent):
if wordFilter and not wordFilter(word): continue
self.dictCount[word] = self.dictCount.get(word, 0) + 1
self.nTotal += 1
if i - 1 >= 0 and (not wordFilter or wordFilter(sent[i - 1])): insertNearPair(sent[i - 1], word)
if i + 1 < len(sent) and (not wordFilter or wordFilter(sent[i + 1])): insertNearPair(word, sent[i + 1])
for j in range(i + 1, min(i + self.window + 1, len(sent))):
if wordFilter and not wordFilter(sent[j]): continue
if sent[j] != word: insertPair(word, sent[j])
def loadSents(self, sentenceIter, tokenizer=None):
import math
def similarity(a, b):
n = len(a.intersection(b))
return n / float(len(a) + len(b) - n) / (math.log(len(a) + 1) * math.log(len(b) + 1))
if not tokenizer: rgxSplitter = re.compile('[\\s.,:;-?!()"\']+')
sentSet = []
for sent in filter(None, sentenceIter):
if type(sent) == str:
if tokenizer:
s = set(filter(None, tokenizer(sent)))
else:
s = set(filter(None, rgxSplitter.split(sent)))
else:
s = set(sent)
if len(s) < 2: continue
self.dictCount[len(self.dictCount)] = sent
sentSet.append(s)
for i in range(len(self.dictCount)):
for j in range(i + 1, len(self.dictCount)):
s = similarity(sentSet[i], sentSet[j])
if s < self.threshold: continue
self.dictBiCount[i, j] = s
def getPMI(self, a, b):
import math
co = self.dictNear.get((a, b), 0)
if not co: return None
return math.log(float(co) * self.nTotal / self.dictCount[a] / self.dictCount[b])
def getI(self, a):
import math
if a not in self.dictCount: return None
return math.log(self.nTotal / self.dictCount[a])
def build(self):
self.graph = networkx.Graph()
self.graph.add_nodes_from(self.dictCount.keys())
for (a, b), n in self.dictBiCount.items():
self.graph.add_edge(a, b, weight=n * self.coef + (1 - self.coef))
def rank(self):
return networkx.pagerank(self.graph, weight='weight')
def extract(self, ratio=0.1):
ranks = self.rank()
cand = sorted(ranks, key=ranks.get, reverse=True)[:int(len(ranks) * ratio)]
pairness = {}
startOf = {}
tuples = {}
for k in cand:
tuples[(k,)] = self.getI(k) * ranks[k]
for l in cand:
if k == l: continue
pmi = self.getPMI(k, l)
if pmi: pairness[k, l] = pmi
for (k, l) in sorted(pairness, key=pairness.get, reverse=True):
# print(k[0], l[0], pairness[k, l])
if k not in startOf: startOf[k] = (k, l)
for (k, l), v in pairness.items():
pmis = v
rs = ranks[k] * ranks[l]
path = (k, l)
tuples[path] = pmis / (len(path) - 1) * rs ** (1 / len(path)) * len(path)
last = l
while last in startOf and len(path) < 7:
if last in path: break
pmis += pairness[startOf[last]]
last = startOf[last][1]
rs *= ranks[last]
path += (last,)
tuples[path] = pmis / (len(path) - 1) * rs ** (1 / len(path)) * len(path)
used = set()
both = {}
for k in sorted(tuples, key=tuples.get, reverse=True):
if used.intersection(set(k)): continue
both[k] = tuples[k]
for w in k: used.add(w)
# for k in cand:
# if k not in used or True: both[k] = ranks[k] * self.getI(k)
return both
def summarize(self, ratio=0.333):
r = self.rank()
ks = sorted(r, key=r.get, reverse=True)[:int(len(r) * ratio)]
return ' '.join(map(lambda k: self.dictCount[k], sorted(ks))) | StarcoderdataPython |
6548305 | # ---------------------------------------------------------------------------
# Pelion Device Management SDK
# (C) COPYRIGHT 2017 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Public API for Bootstrap API."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Import common functions and exceptions from frontend API
from mbed_cloud.core import BaseAPI
from mbed_cloud.core import BaseObject
from mbed_cloud.decorators import catch_exceptions
from mbed_cloud.pagination import PaginatedResponse
# Import backend API
import mbed_cloud._backends.connector_bootstrap as bootstrap
from mbed_cloud._backends.connector_bootstrap import models
from mbed_cloud._backends.connector_bootstrap.rest import ApiException as BootstrapAPIException
LOG = logging.getLogger(__name__)
class BootstrapAPI(BaseAPI):
"""API reference for the Bootstrap API."""
api_structure = {bootstrap: [bootstrap.PreSharedKeysApi]}
@catch_exceptions(BootstrapAPIException)
def add_psk(self, **kwargs):
"""Add"""
api = self._get_api(bootstrap.PreSharedKeysApi)
item = PreSharedKey._create_request_map(kwargs)
item = models.PreSharedKey(**item)
api.upload_pre_shared_key(item)
return PreSharedKey(item)
@catch_exceptions(BootstrapAPIException)
def get_psk(self, endpoint_name, **kwargs):
"""Get"""
api = self._get_api(bootstrap.PreSharedKeysApi)
return PreSharedKey(api.get_pre_shared_key(endpoint_name=endpoint_name))
@catch_exceptions(BootstrapAPIException)
def list_psks(self, **kwargs):
"""List"""
api = self._get_api(bootstrap.PreSharedKeysApi)
return PaginatedResponse(api.list_pre_shared_keys, lwrap_type=PreSharedKey, **kwargs)
@catch_exceptions(BootstrapAPIException)
def delete_psk(self, endpoint_name, **kwargs):
"""Delete"""
api = self._get_api(bootstrap.PreSharedKeysApi)
return api.delete_pre_shared_key(endpoint_name=endpoint_name)
class PreSharedKey(BaseObject):
"""Describes device object from the catalog.
For more information about such keys,
have a look at
https://cloud.mbed.com/docs/latest/connecting/mbed-client-lite-security-considerations.html"
"""
@staticmethod
def _get_attributes_map():
return {
'endpoint_name': 'endpoint_name',
'secret_hex': 'secret_hex',
'created_at': 'created_at',
}
@property
def endpoint_name(self):
"""The endpoint_name of this PreSharedKey.
Endpoint name is the unique ID of the pre-shared key.
16-64 printable (non-control) ASCII characters.
It also must be globally unique.
Consider using vendor-MAC-ID-device-model.
For example "myEndpoint.host.com"
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
"""
return self._endpoint_name
@property
def secret_hex(self):
"""The secret_hex of this PreSharedKey.
The secret of the pre-shared key in HEX format.
- It is not case sensitive; 4a is same as 4A
- It is allowed with or without 0x in the beginning.
- The minimum length of the secret is 128 bits and max 512 bits.
For example "4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a"
:return: The secret_hex of this PreSharedKey.
:rtype: str
"""
return self._secret_hex
@property
def created_at(self):
"""Gets the created_at of this PreSharedKey.
The date-time (RFC3339) when this pre-shared key was uploaded to Pelion Device Management.
:return: The created_at of this PreSharedKey.
:rtype: datetime
"""
return self._created_at
| StarcoderdataPython |
3255964 | from opening_hours.hours import get_opening_hours
def get_resources_total_hours(resource_ids, period_start, period_end):
opening_hours = get_opening_hours(
resource_ids,
period_start,
period_end,
)
hours_in_day = 24
total_opening_hours = 0
for opening_hour in opening_hours:
for time in opening_hour["times"]:
if time.full_day:
total_opening_hours += hours_in_day
continue
if time.end_time_on_next_day:
total_opening_hours += hours_in_day - time.start_time.hour
continue
total_opening_hours += time.end_time.hour - time.start_time.hour
return total_opening_hours
def get_resources_total_hours_per_resource(resource_ids, period_start, period_end):
opening_hours = get_opening_hours(
resource_ids,
period_start,
period_end,
)
hours_in_day = 24
total_opening_hours = {}
for opening_hour in opening_hours:
total_open = 0
for time in opening_hour["times"]:
if time.full_day:
total_opening_hours += hours_in_day
continue
if time.end_time_on_next_day:
total_opening_hours += hours_in_day - time.start_time.hour
continue
total_open += time.end_time.hour - time.start_time.hour
prev_tot = total_opening_hours.get(opening_hour["resource_id"], 0)
total_opening_hours[opening_hour["resource_id"]] = total_open + prev_tot
return total_opening_hours
| StarcoderdataPython |
4959932 | import json
import logging
import os
import requests
import sys
import uuid
WIT_API_HOST = os.getenv('WIT_URL', 'https://api.wit.ai')
WIT_API_VERSION = os.getenv('WIT_API_VERSION', '20160516')
DEFAULT_MAX_STEPS = 5
INTERACTIVE_PROMPT = '> '
LEARN_MORE = 'Learn more at https://wit.ai/docs/quickstart'
class WitError(Exception):
pass
def req(logger, access_token, meth, path, params, **kwargs):
full_url = WIT_API_HOST + path
logger.debug('%s %s %s', meth, full_url, params)
headers = {
'authorization': 'Bearer ' + access_token,
'accept': 'application/vnd.wit.' + WIT_API_VERSION + '+json'
}
headers.update(kwargs.pop('headers', {}))
rsp = requests.request(
meth,
full_url,
headers=headers,
params=params,
**kwargs
)
if rsp.status_code > 200:
raise WitError('Wit responded with status: ' + str(rsp.status_code) +
' (' + rsp.reason + ')')
json = rsp.json()
if 'error' in json:
raise WitError('Wit responded with an error: ' + json['error'])
logger.debug('%s %s %s', meth, full_url, json)
return json
def validate_actions(logger, actions):
if not isinstance(actions, dict):
logger.warn('The second parameter should be a dictionary.')
for action in ['send']:
if action not in actions:
logger.warn('The \'' + action + '\' action is missing. ' +
LEARN_MORE)
for action in actions.keys():
if not hasattr(actions[action], '__call__'):
logger.warn('The \'' + action +
'\' action should be a function.')
return actions
class Wit:
access_token = None
actions = {}
_sessions = {}
def __init__(self, access_token, actions=None, logger=None):
self.access_token = access_token
self.logger = logger or logging.getLogger(__name__)
if actions:
self.actions = validate_actions(self.logger, actions)
def message(self, msg, context=None, verbose=None):
params = {}
if verbose:
params['verbose'] = True
if msg:
params['q'] = msg
if context:
params['context'] = json.dumps(context)
resp = req(self.logger, self.access_token, 'GET', '/message', params)
return resp
def speech(self, audio_file, verbose=None, headers=None):
""" Sends an audio file to the /speech API.
Uses the streaming feature of requests (see `req`), so opening the file
in binary mode is strongly reccomended (see
http://docs.python-requests.org/en/master/user/advanced/#streaming-uploads).
Add Content-Type header as specified here: https://wit.ai/docs/http/20160526#post--speech-link
:param audio_file: an open handler to an audio file
:param verbose:
:param headers: an optional dictionary with request headers
:return:
"""
params = {}
headers = headers or {}
if verbose:
params['verbose'] = True
resp = req(self.logger, self.access_token, 'POST', '/speech', params,
data=audio_file, headers=headers)
return resp
def converse(self, session_id, message, context=None, reset=None,
verbose=None):
if context is None:
context = {}
params = {'session_id': session_id}
if verbose:
params['verbose'] = True
if message:
params['q'] = message
if reset:
params['reset'] = True
resp = req(self.logger, self.access_token, 'POST', '/converse', params,
data=json.dumps(context))
return resp
def __run_actions(self, session_id, current_request, message, context, i,
verbose):
if i <= 0:
raise WitError('Max steps reached, stopping.')
json = self.converse(session_id, message, context, verbose=verbose)
if 'type' not in json:
raise WitError('Couldn\'t find type in Wit response')
if current_request != self._sessions[session_id]:
return context
self.logger.debug('Context: %s', context)
self.logger.debug('Response type: %s', json['type'])
# backwards-cpmpatibility with API version 20160516
if json['type'] == 'merge':
json['type'] = 'action'
json['action'] = 'merge'
if json['type'] == 'error':
raise WitError('Oops, I don\'t know what to do.')
if json['type'] == 'stop':
return context
request = {
'session_id': session_id,
'context': dict(context),
'text': message,
'entities': json.get('entities'),
}
if json['type'] == 'msg':
self.throw_if_action_missing('send')
response = {
'text': json.get('msg').encode('utf8'),
'quickreplies': json.get('quickreplies'),
}
self.actions['send'](request, response)
elif json['type'] == 'action':
action = json['action']
self.throw_if_action_missing(action)
context = self.actions[action](request)
if context is None:
self.logger.warn('missing context - did you forget to return it?')
context = {}
else:
raise WitError('unknown type: ' + json['type'])
if current_request != self._sessions[session_id]:
return context
return self.__run_actions(session_id, current_request, None, context,
i - 1, verbose)
def run_actions(self, session_id, message, context=None,
max_steps=DEFAULT_MAX_STEPS, verbose=None):
if not self.actions:
self.throw_must_have_actions()
if context is None:
context = {}
# Figuring out whether we need to reset the last turn.
# Each new call increments an index for the session.
# We only care about the last call to run_actions.
# All the previous ones are discarded (preemptive exit).
current_request = self._sessions[session_id] + 1 if session_id in self._sessions else 1
self._sessions[session_id] = current_request
context = self.__run_actions(session_id, current_request, message,
context, max_steps, verbose)
# Cleaning up once the last call to run_actions finishes.
if current_request == self._sessions[session_id]:
del self._sessions[session_id]
return context
def interactive(self, context=None, max_steps=DEFAULT_MAX_STEPS):
"""Runs interactive command line chat between user and bot. Runs
indefinately until EOF is entered to the prompt.
context -- optional initial context. Set to {} if omitted
max_steps -- max number of steps for run_actions.
"""
if not self.actions:
self.throw_must_have_actions()
if max_steps <= 0:
raise WitError('max iterations reached')
if context is None:
context = {}
# input/raw_input are not interchangible between python 2 and 3
try:
input_function = raw_input
except NameError:
input_function = input
session_id = uuid.uuid1()
while True:
try:
message = input_function(INTERACTIVE_PROMPT).rstrip()
except (KeyboardInterrupt, EOFError):
return
context = self.run_actions(session_id, message, context, max_steps)
def throw_if_action_missing(self, action_name):
if action_name not in self.actions:
raise WitError('unknown action: ' + action_name)
def throw_must_have_actions(self):
raise WitError('You must provide the `actions` parameter to be able to use runActions. ' + LEARN_MORE)
| StarcoderdataPython |
6449264 | from . import oldShorthands
def run(doc):
oldShorthands.transformShorthandElements(doc)
oldShorthands.transformProductionPlaceholders(doc)
oldShorthands.transformMaybePlaceholders(doc)
oldShorthands.transformAutolinkShortcuts(doc)
oldShorthands.transformProductionGrammars(doc)
| StarcoderdataPython |
4860919 | from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from utilities import clean_text
import pandas as pd
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
# function to parse text and spit out prediction based on 2 classes:
def classify_text(text, mdl):
parsed_text = [clean_text(text)]
return mdl.predict_proba(parsed_text)
| StarcoderdataPython |
11220450 | import i3ipc
from .find import Finder
class Executor():
def __init__(self, prefix):
self.count = 0
self.prefix = prefix
self.callback = lambda i3, window: None
self.callbacks = {}
def onNewWindow(self, on_new_window):
self.callback = on_new_window
return self
def onDone(self, on_done):
self.on_done = on_done
return self
def exec(self, cmd):
self.i3 = i3ipc.Connection()
workspace = self.i3.get_tree().find_focused().workspace().name
ident = '{prefix}_{count}'.format(prefix=self.prefix, count=self.count)
self.callbacks[ident] = self.callback
self.count += 1
self.i3.command('workspace '+ident)
self.i3.command('exec '+cmd)
self.i3.on('window::new', lambda i3, e: self.on_new_window(i3, e))
self.i3.command('workspace '+workspace)
return self
def loop(self):
self.i3.main()
return self
def on_new_window(self, i3, e):
windows = Finder(i3.get_tree()).find(id=e.container.id)
window = windows[0] if windows else None
callback = self.callbacks.pop(window.workspace().name, None)
if callback != None:
callback(self, i3, window)
if not self.callbacks:
self.on_done(self, i3)
| StarcoderdataPython |
6670788 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def CheckChangeOnUpload(input_api, output_api):
"""Warn when changing md_history without vulcanizing."""
def _is_history_source_file(file):
path = file.LocalPath()
return (not path.endswith('externs.js') and
not path.endswith('crisper.js') and
not path.endswith('vulcanized.html') and
(path.endswith('.js') or path.endswith('.html')))
os_path = input_api.os_path
earliest_vulcanize_change = min(os_path.getmtime(x) for x in
['app.vulcanized.html',
'app.crisper.js',
'lazy_load.vulcanized.html',
'lazy_load.crisper.js'])
source_files = input_api.AffectedFiles(file_filter=_is_history_source_file)
latest_history_change = 0
if source_files:
latest_history_change = max(
os_path.getmtime(os_path.basename(f.LocalPath())) for f in source_files)
if latest_history_change > earliest_vulcanize_change:
return [output_api.PresubmitPromptWarning(
'Vulcanize must be run when changing files in md_history. See '
'docs/vulcanize.md.')]
return []
| StarcoderdataPython |
11318930 | # -*- coding: utf-8 -*-
# -------------------------------------------#
# author: <NAME> #
# email: <EMAIL> #
# -------------------------------------------#
""" XMNLP - Lexical
Model Tree:
lexical
├── label2id.json
├── lexical.onnx
├── trans.npy
└── vocab.txt
"""
import os
import json
from typing import List, Tuple
import numpy as np
from tokenizers import BertWordPieceTokenizer
from xmnlp.base_model import BaseModel
from xmnlp.utils import rematch
MAX_LEN = 512
class LexicalModel(BaseModel):
def predict(self, token_ids: np.ndarray, segment_ids: np.ndarray) -> np.ndarray:
token_ids = token_ids.astype('float32')
segment_ids = segment_ids.astype('float32')
return self.sess.run(['crf/sub_1:0'], {'Input-Token:0': token_ids,
'Input-Segment:0': segment_ids})
class LexicalDecoder:
def __init__(self, model_dir, starts=None, ends=None):
self.trans = np.load(os.path.join(model_dir, 'trans.npy'))
self.tokenizer = BertWordPieceTokenizer(os.path.join(model_dir, 'vocab.txt'))
self.lexical_model = LexicalModel(os.path.join(model_dir, 'lexical.onnx'))
with open(os.path.join(model_dir, 'label2id.json'), encoding='utf-8') as reader:
label2id = json.load(reader)
self.id2label = {int(v): k for k, v in label2id.items()}
self.num_labels = len(self.trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
"""An elegant viterbi decode implementation
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/snippets.py#L404
"""
# 预处理
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
# 动态规划
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for i in range(1, len(nodes)):
M = scores + self.trans + nodes[i].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
# 最优路径
return paths[:, scores[:, 0].argmax()]
def predict(self, text: str) -> List[Tuple[str, str]]:
tokenized = self.tokenizer.encode(text)
if len(tokenized.tokens) > MAX_LEN:
raise ValueError('The text is too long (>512) to process')
token_ids = tokenized.ids
segment_ids = tokenized.type_ids
mapping = rematch(tokenized.offsets)
token_ids, segment_ids = np.array([token_ids]), np.array([segment_ids])
nodes = self.lexical_model.predict(token_ids, segment_ids)[0][0]
labels = self.decode(nodes)
entities, starting = [], False
for i, label in enumerate(labels):
if label > 0:
if label % 2 == 1:
starting = True
entities.append([[i], self.id2label[(label - 1) // 2]])
elif starting:
entities[-1][0].append(i)
else:
starting = False
else:
starting = False
return [(text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1], l)
for w, l in entities]
| StarcoderdataPython |
4838645 | import FWCore.ParameterSet.Config as cms
HITrackCorrections = cms.EDAnalyzer('HITrackCorrectionAnalyzer',
trackSrc = cms.InputTag("hiGeneralTracks"),
vertexSrc = cms.InputTag("hiSelectedVertex"),
pfCandSrc = cms.InputTag("particleFlowTmp"),
jetSrc = cms.InputTag("akPu4CaloJets"),
tpEffSrc = cms.InputTag('mix','MergedTrackTruth'),
tpFakSrc = cms.InputTag('mix','MergedTrackTruth'),
associatorMap = cms.InputTag('tpRecoAssocGeneralTracks'),
ptBins = cms.vdouble(
0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45,
0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95,
1.0, 1.05, 1.1, 1.15, 1.2,
1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
2.5, 3.0, 4.0, 5.0, 7.5, 10.0, 12.0, 15.0,
20.0, 25.0, 30.0, 45.0, 60.0, 90.0, 120.0,
180.0, 300.0, 500.0
),
etaBins = cms.vdouble(
-2.4, -2.0, -1.6, -1.2, -0.8, -0.4, 0.0,
0.4, 0.8, 1.2, 1.6, 2.0, 2.4
),
occBins = cms.vdouble(0.0, 40.0, 80.0, 120.0, 160.0, 200.0),
vtxWeightParameters = cms.vdouble( 4.49636e-02, 1.36629e-01, 5.30010e+00,
2.50170e-02, 4.59123e-01, 9.64888e+00 ),
algoParameters = cms.vint32(4,5,6,7),
doCaloMatched = cms.bool(True),
reso = cms.double(2.0),
crossSection = cms.double(1.0),
doVtxReweighting = cms.bool(False),
applyVertexZCut = cms.bool(False),
vertexZMax = cms.double(15.),
applyTrackCuts = cms.bool(True),
qualityString = cms.string("highPurity"),
dxyErrMax = cms.double(3.0),
dzErrMax = cms.double(3.0),
ptErrMax = cms.double(0.1),
nhitsMin = cms.int32(11),
partileCharge = cms.int32(2),
chi2nMax = cms.double(0.15),
doMomRes = cms.bool(False),
fillNTuples = cms.bool(False),
useCentrality = cms.bool(False),
centralitySrc = cms.InputTag("centralityBin","HFTowers")
)
| StarcoderdataPython |
4863732 | <reponame>NunoEdgarGFlowHub/pymia
"""The evaluator module provides classes to evaluate the metrics on predictions.
All evaluators inherit the :class:`pymia.evaluation.evaluator.Evaluator`, which contains a list of results after
calling :meth:`pymia.evaluation.evaluator.Evaluator.evaluate`. The results can be passed to a writer of the
:mod:`pymia.evaluation.writer` module.
"""
import abc
import typing
import numpy as np
import SimpleITK as sitk
import pymia.evaluation.metric as pymia_metric
class Result:
def __init__(self, id_: str, label: str, metric: str, value):
"""Represents a result.
Args:
id_ (str): The identification of the result (e.g., the subject's name).
label (str): The label of the result (e.g., the foreground).
metric (str): The metric.
value (int, float): The value of the metric.
"""
self.id_ = id_
self.label = label
self.metric = metric
self.value = value
class Evaluator(abc.ABC):
def __init__(self, metrics: typing.List[pymia_metric.Metric]):
"""Evaluator base class.
Args:
metrics (list of pymia_metric.Metric): A list of metrics.
"""
self.metrics = metrics
self.results = []
@abc.abstractmethod
def evaluate(self,
prediction: typing.Union[sitk.Image, np.ndarray],
reference: typing.Union[sitk.Image, np.ndarray],
id_: str, **kwargs):
"""Evaluates the metrics on the provided prediction and reference.
Args:
prediction (typing.Union[sitk.Image, np.ndarray]): The prediction.
reference (typing.Union[sitk.Image, np.ndarray]): The reference.
id_ (str): The identification of the case to evaluate.
"""
raise NotImplementedError
def clear(self):
"""Clears the results."""
self.results = []
class SegmentationEvaluator(Evaluator):
def __init__(self, metrics: typing.List[pymia_metric.Metric], labels: dict):
"""Represents a segmentation evaluator, evaluating metrics on predictions against references.
Args:
metrics (list of pymia_metric.Metric): A list of metrics.
labels (dict): A dictionary with labels (key of type int) and label descriptions (value of type string).
"""
super().__init__(metrics)
self.labels = labels
def add_label(self, label: typing.Union[tuple, int], description: str):
"""Adds a label with its description to the evaluation.
Args:
label (Union[tuple, int]): The label or a tuple of labels that should be merged.
description (str): The label's description.
"""
self.labels[label] = description
def evaluate(self,
prediction: typing.Union[sitk.Image, np.ndarray],
reference: typing.Union[sitk.Image, np.ndarray],
id_: str, **kwargs):
"""Evaluates the metrics on the provided prediction and reference image.
Args:
prediction (typing.Union[sitk.Image, np.ndarray]): The predicted image.
reference (typing.Union[sitk.Image, np.ndarray]): The reference image.
id_ (str): The identification of the case to evaluate.
Raises:
ValueError: If no labels are defined (see add_label).
"""
if not self.labels:
raise ValueError('No labels to evaluate defined')
if isinstance(prediction, sitk.Image) and prediction.GetNumberOfComponentsPerPixel() > 1:
raise ValueError('Image has more than one component per pixel')
if isinstance(reference, sitk.Image) and reference.GetNumberOfComponentsPerPixel() > 1:
raise ValueError('Image has more than one component per pixel')
prediction_array = sitk.GetArrayFromImage(prediction) if isinstance(prediction, sitk.Image) else prediction
reference_array = sitk.GetArrayFromImage(reference) if isinstance(reference, sitk.Image) else reference
for label, label_str in self.labels.items():
# get only current label
prediction_of_label = np.in1d(prediction_array.ravel(), label, True).reshape(prediction_array.shape).astype(np.uint8)
reference_of_label = np.in1d(reference_array.ravel(), label, True).reshape(reference_array.shape).astype(np.uint8)
# calculate the confusion matrix for ConfusionMatrixMetric
confusion_matrix = pymia_metric.ConfusionMatrix(prediction_of_label, reference_of_label)
# for distance metrics
distances = None
# spacing depends on SimpleITK image properties or an isotropic spacing as fallback
def get_spacing():
if isinstance(prediction, sitk.Image):
return prediction.GetSpacing()[::-1]
else:
return (1.0,) * reference_of_label.ndim # use isotropic spacing of 1 mm
# calculate the metrics
for param_index, metric in enumerate(self.metrics):
if isinstance(metric, pymia_metric.ConfusionMatrixMetric):
metric.confusion_matrix = confusion_matrix
# ensure this is checked before NumpyArrayMetric as SpacingMetric is itself a NumpyArrayMetric
elif isinstance(metric, pymia_metric.SpacingMetric):
metric.reference = reference_of_label
metric.prediction = prediction_of_label
metric.spacing = get_spacing()
elif isinstance(metric, pymia_metric.NumpyArrayMetric):
metric.reference = reference_of_label
metric.prediction = prediction_of_label
elif isinstance(metric, pymia_metric.DistanceMetric):
if distances is None:
# calculate distances only once
distances = pymia_metric.Distances(prediction_of_label, reference_of_label, get_spacing())
metric.distances = distances
self.results.append(Result(id_, label_str, metric.metric, metric.calculate()))
| StarcoderdataPython |
5142503 | endpoint="http://dynamodb.us-east-1.amazonaws.com"
| StarcoderdataPython |
222497 | import os
import theano
import theano.tensor as T
from neupy import layers, storage, architectures
from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image,
print_top_n, download_file)
theano.config.floatX = 'float32'
SQUEEZENET_WEIGHTS_FILE = os.path.join(FILES_DIR, 'squeezenet.pickle')
# Networks weight ~4.8 Mb
squeezenet = architectures.squeezenet()
if not os.path.exists(SQUEEZENET_WEIGHTS_FILE):
download_file(
url="http://neupy.s3.amazonaws.com/imagenet-models/squeezenet.pickle",
filepath=SQUEEZENET_WEIGHTS_FILE,
description='Downloading weights')
storage.load(squeezenet, SQUEEZENET_WEIGHTS_FILE)
monkey_image = load_image(
os.path.join(CURRENT_DIR, 'images', 'titi-monkey.jpg'),
image_size=(227, 227),
crop_size=(227, 227),
use_bgr=True)
predict = squeezenet.compile()
output = predict(monkey_image)
print_top_n(output, n=5)
| StarcoderdataPython |
5182344 | <filename>lecture_11_advanced_topics/hw/hw2.py
"""
You are given the following code:
class Order:
morning_discount = 0.25
def __init__(self, price):
self.price = price
def final_price(self):
return self.price - self.price * self.morning_discount
Make it possible to use different discount programs.
Hint: use strategy behavioural OOP pattern.
https://refactoring.guru/design-patterns/strategy
Example of the result call:
def morning_discount(order):
...
def elder_discount(order):
...
order_1 = Order(100, morning_discount)
assert order_1.final_price() == 75
order_2 = Order(100, elder_discount)
assert order_2.final_price() == 10
"""
| StarcoderdataPython |
3525946 | <gh_stars>10-100
'''
Authors: <NAME>, <NAME>
Emails: <EMAIL>, <EMAIL>
Date: 03/17/2019
Overview: Entry point of autostack.
'''
from autostack.cli import cli
def main():
'''
Entry point of autostack.
'''
cli()
| StarcoderdataPython |
11233175 | # Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from python.constructs.loggable import Loggable
import tensorflow as tf
class MarginPlusMeanSquaredError(Loggable):
def __init__(self):
Loggable.__init__(self)
def __call__(self, features, labels, model_output, batch_size):
margin = 0.4
downweight = 0.5
logits = model_output[0] - 0.5
positive_cost = labels * tf.cast(tf.less(logits, margin),
tf.float32) * tf.pow(logits - margin, 2)
negative_cost = (1 - labels) * tf.cast(tf.greater(logits, -margin),
tf.float32) * tf.pow(logits + margin, 2)
margin_loss = 0.5 * positive_cost + downweight * 0.5 * negative_cost
recon_loss = tf.square(tf.subtract(features, model_output[1]))
return (tf.reduce_sum(margin_loss)
+ tf.reduce_sum(recon_loss*0.0005)) / batch_size
class CategoricalCrossEntropyPlusMeanSquaredError(Loggable):
def __init__(self, from_logits=True):
Loggable.__init__(self)
self._from_logits = from_logits
def __call__(self, features, labels, model_output, batch_size):
label_loss = tf.keras.losses.categorical_crossentropy(
labels, model_output[0], from_logits=self._from_logits)
recon_loss = tf.square(tf.subtract(features, model_output[1]))
return (tf.reduce_sum(label_loss)
+ tf.reduce_sum(recon_loss*0.0005)) / batch_size
class CategoricalCrossEntropy(Loggable):
def __init__(self, from_logits=True):
Loggable.__init__(self)
self._from_logits = from_logits
def __call__(self, features, labels, model_output, batch_size):
loss = tf.keras.losses.categorical_crossentropy(
labels, model_output[0], from_logits=self._from_logits)
return tf.reduce_sum(loss) / batch_size
class MeanSquaredError(Loggable):
def __init__(self, from_logits=True):
Loggable.__init__(self)
self._from_logits = from_logits
def __call__(self, features, labels, model_output, batch_size):
loss = tf.square(tf.subtract(features, model_output[1]))
return tf.reduce_sum(loss) / batch_size
| StarcoderdataPython |
11246194 | <reponame>Oyekunle-Mark/eat-that-cake
def find_repeat(numbers):
# sort numbers in place
numbers.sort()
# initialize a variable previous_number to zero
previous_number = 0
# loop through every number in numbers
for number in numbers:
# if current number equals previous_number
if number == previous_number:
# return previous_number
return previous_number
# set previous_number to the current number
previous_number = number
| StarcoderdataPython |
1810047 | ''' show_arp.py
IOSXR parsers for the following show commands:
* show arp detail
* show arp vrf <WORD> detail
* show arp traffic detail
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
# parser utils
from genie.libs.parser.utils.common import Common
# =======================================
# Schema for 'show arp detail'
# 'show arp vrf <WORD> detail'
# =======================================
class ShowArpDetailSchema(MetaParser):
"""Schema for
show arp detail
show arp vrf <WORD> detail
"""
schema = {
'interfaces': {
Any(): {
'ipv4': {
'neighbors': {
Any(): {
'ip': str,
'link_layer_address': str,
'origin': str,
'age': str,
'type': str,
},
}
}
},
}
}
# =======================================
# Parser for 'show arp detail'
# 'show arp vrf <WORD> detail'
# =======================================
class ShowArpDetail(ShowArpDetailSchema):
"""Parser for:
show arp detail
show arp vrf <WORD> detail
parser class - implements detail parsing mechanisms for cli,xml and yang output.
"""
cli_command = ['show arp vrf {vrf} detail','show arp detail']
def cli(self, vrf=None,output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# 10.1.2.1 02:55:43 fa16.3e4c.b963 Dynamic Dynamic ARPA GigabitEthernet0/0/0/0
# 10.1.2.2 - fa16.3ee4.1462 Interface Unknown ARPA GigabitEthernet0/0/0/0
p1 = re.compile(r'^(?P<ip_address>[\w\.]+) +(?P<age>[\w\:\-]+)'
' +(?P<mac_address>[\w\.]+) +(?P<state>\w+) +(?P<flag>\w+)'
' +(?P<type>[\w\.]+) +(?P<interface>[\w\.\/]+)$')
# initial variables
ret_dict = {}
for line in out.splitlines():
line = line.strip()
if not line:
continue
m = p1.match(line)
if m:
group = m.groupdict()
address = group['ip_address']
interface = group['interface']
final_dict = ret_dict.setdefault('interfaces', {}).setdefault(
interface, {}).setdefault('ipv4', {}).setdefault(
'neighbors', {}).setdefault(address, {})
final_dict['ip'] = address
final_dict['link_layer_address'] = group['mac_address']
final_dict['age'] = group['age']
if group['age'] == '-':
final_dict['origin'] = 'static'
else:
final_dict['origin'] = 'dynamic'
final_dict['type'] = group['type']
continue
return ret_dict
# =======================================
# Schema for 'show arp traffic detail'
# =======================================
class ShowArpTrafficDetailSchema(MetaParser):
""" Schema for show arp traffic detail """
schema = {
Any():
{'statistics':
{'in_requests_pkts': int,
'in_replies_pkts': int,
'out_requests_pkts': int,
'out_replies_pkts': int,
'out_gratuitous_pkts': int,
'out_proxy': int,
'out_local_proxy': int,
'subscriber_intf_requests': int,
'subscriber_intf_replies': int,
'subscriber_intf_gratuitous': int,
'resolve_rcvd_requests': int,
'resolve_dropped_requests': int,
'out_of_memory_errors': int,
'no_buffers_errors': int,
'out_of_subnet_errors': int,
},
'cache':
{'total_arp_entries': int,
'dynamic': int,
'interface': int,
'standby': int,
'alias': int,
'static': int,
'dhcp': int,
'dhcp': int,
'ip_packet_drop_count': int,
'total_arp_idb': int,
}
}
}
# =======================================
# Parser for 'show arp traffic detail'
# =======================================
class ShowArpTrafficDetail(ShowArpTrafficDetailSchema):
""" Parser for show arp traffic detail """
cli_command = 'show arp traffic detail'
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# 0/0/CPU0
p1 = re.compile(r'^(?P<rack_slot_module>[\w\/]+)$')
# ARP statistics:
p2 = re.compile(r'^ARP +statistics:$')
# Recv: 108 requests, 8 replies
p3 = re.compile(r'^Recv: +(?P<in_requests_pkts>\w+) +requests,'
' +(?P<in_replies_pkts>[\w]+) +replies$')
# Sent: 8 requests, 108 replies (0 proxy, 0 local proxy, 2 gratuitous)
p4 = re.compile(r'^Sent: +(?P<out_requests_pkts>\w+) +requests,'
' +(?P<out_replies_pkts>\w+) +replies +\((?P<out_proxy>\w+)'
' +proxy, +(?P<out_local_proxy>\w+) +local +proxy,'
' +(?P<out_gratuitous_pkts>\w+) +gratuitous\)$')
# 0 requests recv, 0 replies sent, 0 gratuitous replies sent
p5 = re.compile(r'^(?P<subscriber_intf_requests>\w+) +requests +recv,'
' +(?P<subscriber_intf_replies>\w+) +replies +sent,'
' +(?P<subscriber_intf_gratuitous>\w+) +gratuitous +replies +sent$')
# Resolve requests rcvd: 0
p6 = re.compile(r'^Resolve +requests +rcvd:'
' +(?P<resolve_rcvd_requests>\w+)$')
# Resolve requests dropped: 0
p7 = re.compile(r'^Resolve +requests +dropped:'
' +(?P<resolve_dropped_requests>\w+)$')
# Errors: 0 out of memory, 0 no buffers, 0 out of subnet
p8 = re.compile(r'^Errors:'
' +(?P<out_of_memory_errors>\w+) +out +of +memory,'
' +(?P<no_buffers_errors>\w+) +no +buffers,'
' +(?P<out_of_subnet_errors>\w+) +out +of +(subnet|sunbet)$')
# ARP cache:
p9 = re.compile(r'^ARP +cache:$')
# Total ARP entries in cache: 4
p10 = re.compile(r'^Total +ARP +entries +in +cache:'
' +(?P<total_arp_entries>\w+)$')
# Dynamic: 2, Interface: 2, Standby: 0
p11 = re.compile(r'^Dynamic: +(?P<dynamic>\w+),'
' +Interface: +(?P<interface>\w+),'
' +Standby: +(?P<standby>\w+)$')
# Alias: 0, Static: 0, DHCP: 0
p12 = re.compile(r'^Alias: +(?P<alias>\w+),'
' +Static: +(?P<static>\w+),'
' +DHCP: +(?P<dhcp>\w+)$')
# IP Packet drop count for node 0/0/CPU0: 0
p13 = re.compile(r'^IP +Packet +drop +count +for +node'
' +(?P<ip_packet_rack_slot_module>[\w\/]+):'
' +(?P<ip_packet_drop_count>\w+)$')
# Total ARP-IDB:2
p14 = re.compile(r'^Total +ARP-IDB:(?P<total_arp_idb>\w+)$')
# initial variables
ret_dict = {}
rack_slot_module = ''
for line in out.splitlines():
line = line.strip()
if not line:
continue
m = p1.match(line)
if m:
groups = m.groupdict()
rack_slot_module = groups['rack_slot_module']
continue
m = p2.match(line)
if m:
final_dict = ret_dict.setdefault(
rack_slot_module, {}).setdefault('statistics', {})
continue
m = p3.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p4.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p5.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p6.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p7.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p8.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p9.match(line)
if m:
final_dict = ret_dict[rack_slot_module].setdefault('cache', {})
continue
m = p10.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p11.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p12.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
m = p13.match(line)
if m:
groups = m.groupdict()
final_dict['ip_packet_drop_count'] = \
int(groups['ip_packet_drop_count'])
continue
m = p14.match(line)
if m:
groups = m.groupdict()
final_dict.update({k: \
int(v) for k, v in groups.items()})
continue
return ret_dict | StarcoderdataPython |
1983469 | <gh_stars>0
from abc import ABC, abstractmethod
class ApiBase(ABC):
"""
APIs base methods.
"""
@abstractmethod
def get_url(self) -> str:
pass
@abstractmethod
def process_import(self) -> int:
pass
| StarcoderdataPython |
9663173 | """Backups and commit the configuration, and handles commit confirm."""
import logging
from napalm.base.exceptions import CommitError
from napalm_arubaoss.helper.utils import backup_config, commit_candidate
from napalm_arubaoss.helper.get_config import get_config
from napalm_arubaoss.helper.load_replace_candidate import load_replace_candidate
from napalm_arubaoss.helper.has_pending_commit import has_pending_commit
logger = logging.getLogger("arubaoss.helper.commit_config")
def commit_config(self, message="", revert_in=None):
"""
Backups and commit the configuration, and handles commit confirm.
:param self: object from class
:param message: Optional - configuration session commit message
:type message: str
:param revert_in: Optional - number of seconds before the configuration
will be rolled back using a commit confirm mechanism.
:type revert_in: int|None
"""
if message:
msg = "\"message\" support has not been added " \
"for this getter on this platform."
raise NotImplementedError(msg)
if not revert_in:
revert_in = 0
if not type(revert_in) == int:
err_msg = "Invalid \"revert_in\" value, "\
"commit confirn cannot be carried out"
logger.error(err_msg)
raise TypeError(err_msg)
if has_pending_commit(self=self):
raise CommitError("Pending commit confirm already in process!")
backup_config(self=self)
logger.debug('Confirm rollback time is {}'.format(str(revert_in)))
if revert_in > 0:
candidate = get_config(
self=self,
retrieve='candidate'
)['candidate']
candidate = candidate[:-2] # remove unneeded chars
candidate_confirm = (
f"{candidate}"
f"job ROLLBACK delay {str(revert_in)} "
"\"cfg-restore flash backup_running\"\n"
)
load_replace_candidate(self=self, config=candidate_confirm)
commit_candidate(self=self, config="REST_Payload_Backup")
| StarcoderdataPython |
115119 | <filename>test/test_topology_handler.py<gh_stars>0
import unittest
import sdxdatamodel.parsing
from sdxdatamodel.parsing.topologyhandler import TopologyHandler
from sdxdatamodel.parsing.exceptions import DataModelException
TOPOLOGY_AMLIGHT = './test/data/amlight.json'
class TestTopologyHandler(unittest.TestCase):
def setUp(self):
self.handler = TopologyHandler(TOPOLOGY_AMLIGHT) # noqa: E501
self.handler.import_topology()
def tearDown(self):
pass
def testImportTopology(self):
try:
print("Test Topology")
print(self.handler.topology)
except DataModelException as e:
print(e)
return False
return True
def testImportTopologyNodes(self):
print("Test Nodes: at least one:")
nodes=self.handler.topology.nodes
if nodes==None or len(nodes)==0:
print("Nodes are empty")
return False
print(nodes[0])
return True
def testImportTopologyLinks(self):
print("Test Links: at least one")
links=self.handler.topology.links
if links==None or len(links)==0:
print("Links are empty")
return False
print(links[0])
return True
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
260005 | #!/usr/bin/env python
"""
Running tests for djangosaml2_spid application.
"""
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2 if '-v' in sys.argv else 1,
failfast='-f' in sys.argv)
test_labels = [arg for arg in sys.argv[1:]
if arg.startswith('tests') or arg.startswith('djangosaml2_spid')]
failures = test_runner.run_tests(test_labels or ['tests', 'src'])
sys.exit(bool(failures))
| StarcoderdataPython |
6641578 | import nuke
def duplicate_node(node, to_file = None):
"""Slightly convoluted but reliable(?) way duplicate a node, using
the same functionality as the regular copy and paste.
Could almost be done tidily by doing:
for knobname in src_node.knobs():
value = src_node[knobname].toScript()
new_node[knobname].fromScript(value)
..but this lacks some subtly like handling custom knobs
to_file can be set to a string, and the node will be written to a
file instead of duplicated in the tree
"""
# Store selection
orig_selection = nuke.selectedNodes()
# Select only the target node
[n.setSelected(False) for n in nuke.selectedNodes()]
node.setSelected(True)
# If writing to a file, do that, restore the selection and return
if to_file is not None:
nuke.nodeCopy(to_file)
[n.setSelected(False) for n in orig_selection]
return
# Copy the selected node and clear selection again
nuke.nodeCopy("%clipboard%")
node.setSelected(False)
if to_file is None:
# If not writing to a file, call paste function, and the new node
# becomes the selected
nuke.nodePaste("%clipboard%")
new_node = nuke.selectedNode()
# Restore original selection
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
[n.setSelected(True) for n in orig_selection] # Select originally selected
return new_node
| StarcoderdataPython |
5149341 | """
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding:utf8 -*-
import json
from urllib import parse
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
from paddleflow.common import api
from paddleflow.job import JobServiceApi
from paddleflow.log import LogServiceApi
from paddleflow.user import UserServiceApi
from paddleflow.queue import QueueServiceApi
from paddleflow.fs import FSServiceApi
from paddleflow.run import RunServiceApi
from paddleflow.pipeline import PipelineServiceApi
from paddleflow.utils import api_client
from paddleflow.cluster import ClusterServiceApi
from paddleflow.flavour import FlavouriceApi
class Client(object):
"""Client class """
def __init__(self, paddleflow_server, username, password, paddleflow_port=8080):
"""
:param paddleflow_server: the address of paddleflow server
:type paddleflow_server: str
:param fs_server: the address of fs server
:type fs_server:str
"""
self.paddleflow_server = None
self.token = None
self.user_id = username
self.header = None
self.password = password
if paddleflow_server is None or paddleflow_server.strip() == "":
raise PaddleFlowSDKException("InvalidServer", "paddleflow server should not be none or empty")
self.paddleflow_server = "http://%s:%s" % (paddleflow_server, paddleflow_port)
def login(self, user_name, password):
"""
:param user_name:
:type user_name: str
:param passWord
:type password: str
"""
if self.paddleflow_server is None:
raise PaddleFlowSDKException("InvalidClient", "client should be initialized")
if user_name is None or user_name.strip() == "":
raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty")
if password is None or password.strip() == "":
raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty")
body = {
"username": user_name,
"password": password
}
response = api_client.call_api(method="POST", url=parse.urljoin(self.paddleflow_server, api.PADDLE_FLOW_LOGIN),
json=body)
if not response:
raise PaddleFlowSDKException("Connection Error", "login failed due to HTTPError")
data = json.loads(response.text)
if 'message' in data:
return False, data['message']
self.user_id = user_name
self.password = password
self.header = {
"x-pf-authorization": data['authorization']
}
return True, None
def pre_check(self):
"""
precheck to check header
"""
if not self.user_id or not self.header:
raise PaddleFlowSDKException("InvalidOperator", "should login first")
def add_user(self, user_name, password):
"""
:param user_name:
:type user_name: str
:param passWord
:type password: str
:return
true, None if success
false, message if failed
"""
self.pre_check()
if user_name is None or user_name.strip() == "":
raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty")
if password is None or password.strip() == "":
raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty")
return UserServiceApi.add_user(self.paddleflow_server, user_name, password, self.header)
def del_user(self, user_name):
"""
:param user_name:
:type user_name: str
:param passWord
:type password: str
:return
:true,None if success
:false, message if failed
"""
self.pre_check()
if user_name is None or user_name.strip() == "":
raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty")
return UserServiceApi.del_user(self.paddleflow_server, user_name, self.header)
def list_user(self, maxsize=100):
"""list user info"""
self.pre_check()
return UserServiceApi.list_user(self.paddleflow_server, self.header, maxsize)
def update_password(self, name, password):
"""update name's password"""
self.pre_check()
if name is None or name.strip() == "":
raise PaddleFlowSDKException("InvalidUser", "user_name should not be none or empty")
if password is None or password.strip() == "":
raise PaddleFlowSDKException("InvalidPassWord", "password should not be none or empty")
return UserServiceApi.update_password(self.paddleflow_server, name, password, self.header)
def add_queue(self, name, namespace, clusterName, maxResources, minResources=None,
schedulingPolicy=None, location=None, quotaType=None):
""" add queue"""
self.pre_check()
if namespace is None or namespace.strip() == "":
raise PaddleFlowSDKException("InvalidNameSpace", "namesapce should not be none or empty")
if name is None or name.strip() == "":
raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty")
if clusterName is None or clusterName.strip() == "":
raise PaddleFlowSDKException("InvalidQueueClusterName", "clustername should not be none or empty")
if maxResources is None or maxResources['cpu'] is None or maxResources['mem'] is None:
raise PaddleFlowSDKException("InvalidQueueMaxResources", "queue maxResources cpu or mem should not be none or empty")
return QueueServiceApi.add_queue(self.paddleflow_server, name, namespace, clusterName, maxResources,
minResources, schedulingPolicy, location, quotaType, self.header)
def grant_queue(self, username, queuename):
""" grant queue"""
self.pre_check()
if username is None or username.strip() == "":
raise PaddleFlowSDKException("InvalidName", "name should not be none or empty")
if queuename is None or queuename.strip() == "":
raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty")
return QueueServiceApi.grant_queue(self.paddleflow_server, username, queuename, self.header)
def ungrant_queue(self, username, queuename):
""" grant queue"""
self.pre_check()
if username is None or username.strip() == "":
raise PaddleFlowSDKException("InvalidName", "name should not be none or empty")
if queuename is None or queuename.strip() == "":
raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty")
return QueueServiceApi.ungrant_queue(self.paddleflow_server, username, queuename, self.header)
def show_queue_grant(self, username=None, maxsize=100):
"""show queue grant info """
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidName", "name should not be none or empty")
return QueueServiceApi.show_grant(self.paddleflow_server, username, self.header, maxsize)
def del_queue(self, queuename):
""" delete queue"""
self.pre_check()
if queuename is None or queuename.strip() == "":
raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty")
return QueueServiceApi.del_queue(self.paddleflow_server, queuename, self.header)
def list_queue(self, maxsize=100, marker=None):
"""
list queue
"""
self.pre_check()
return QueueServiceApi.list_queue(self.paddleflow_server, self.header, maxsize, marker)
def show_queue(self, queuename):
"""
show queue info
"""
self.pre_check()
if queuename is None or queuename.strip() == "":
raise PaddleFlowSDKException("InvalidQueueName", "queuename should not be none or empty")
return QueueServiceApi.show_queue(self.paddleflow_server, queuename, self.header)
def list_flavour(self, maxsize=100, marker=None, clustername="", key=""):
"""
list flavour
"""
self.pre_check()
return FlavouriceApi.list_flavour(host=self.paddleflow_server, header=self.header,
maxsize=maxsize, marker=marker, clustername=clustername, key=key)
def show_flavour(self, name):
"""
show flavour
"""
self.pre_check()
if name is None or name.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty")
return FlavouriceApi.show_flavour(self.paddleflow_server, name, self.header)
def add_flavour(self, name, cpu, memory, scalar_resources=None, cluster_name=None):
""" add flavour"""
self.pre_check()
if name is None or name.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty")
if cpu is None or cpu.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "cpu should not be none or empty")
if memory is None or memory.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "memory should not be none or empty")
return FlavouriceApi.add_flavour(self.paddleflow_server, name, cpu=cpu, mem=memory, scalar_resources=scalar_resources,
cluster_name=cluster_name, header=self.header)
def del_flavour(self, flavourname):
""" delete flavour"""
self.pre_check()
if flavourname is None or flavourname.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "flavourname should not be none or empty")
return FlavouriceApi.del_flavour(self.paddleflow_server, flavourname, self.header)
def update_flavour(self, name, cpu=None, memory=None, scalar_resources=None, cluster_name=None):
"""
update cluster
"""
self.pre_check()
if name is None or name.strip() == "":
raise PaddleFlowSDKException("InvalidFlavourName", "name should not be none or empty")
return FlavouriceApi.update_flavour(self.paddleflow_server, name, cpu=cpu, mem=memory, scalar_resources=scalar_resources,
cluster_name=cluster_name, header=self.header)
def add_fs(self, fsname, url, username=None, properties=None):
"""
add fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if url is None or url.strip() == "":
raise PaddleFlowSDKException("InvalidURL", "url should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.add_fs(self.paddleflow_server, fsname, url, self.user_id, properties, userinfo)
def show_fs(self, fsname, username=None):
"""
show fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.show_fs(self.paddleflow_server, fsname, self.user_id, userinfo)
def delete_fs(self, fsname, username=None):
"""
delete fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.delete_fs(self.paddleflow_server, fsname, self.user_id, userinfo)
def list_fs(self, username=None, maxsize=100):
"""
list fs
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.list_fs(self.paddleflow_server, self.user_id, userinfo, maxsize)
def mount(self, fsname, path, mountOptions, username=None):
"""
mount fs
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if fsname == "":
raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty")
if path == "":
raise PaddleFlowSDKException("InvalidPath", "path should not be none or empty")
userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.mount(self.paddleflow_server, fsname, path,
self.user_id, self.password, mountOptions, userinfo)
def create_cache(self, fsname, options, username=None):
"""
create cache config for fs
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if fsname == "":
raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty")
userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.create_cache(self.paddleflow_server, fsname, options, userinfo)
def update_fs_cache(self, fsname, params, username=None):
"""
update cache config for fs
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if fsname == "":
raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty")
userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.update_cache(self.paddleflow_server, fsname, params, userinfo)
def get_fs_cache(self, fsname, username=None):
"""
get cache config for fs
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if fsname == "":
raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty")
userinfo = {'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.get_cache(self.paddleflow_server, fsname, userinfo)
def delete_fs_cache(self, fsname, username=None):
"""
delete fs cache config
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.delete_cache(self.paddleflow_server, fsname, userinfo)
def add_link(self, fsname, fspath, url, username=None, properties=None):
"""
add link
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if url is None or url.strip() == "":
raise PaddleFlowSDKException("InvalidURL", "url should not be none or empty")
if fspath is None or fspath.strip() == "":
raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.add_link(self.paddleflow_server, fsname, fspath, url, self.user_id, properties, userinfo)
def delete_link(self, fsname, fspath, username=None):
"""
delete fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if fspath is None or fspath.strip() == "":
raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.delete_link(self.paddleflow_server, fsname, fspath, self.user_id, userinfo)
def list_link(self, fsname, username=None, maxsize=100):
"""
list fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.list_link(self.paddleflow_server, fsname, self.user_id, userinfo, maxsize)
def show_link(self, fsname, fspath, username=None):
"""
show fs
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if fspath is None or fspath.strip() == "":
raise PaddleFlowSDKException("InvalidFSPath", "fspath should not be none or empty")
userinfo={'header': self.header, 'name': username, 'host': self.paddleflow_server}
return FSServiceApi.show_link(self.paddleflow_server, fsname, fspath, self.user_id, userinfo)
def create_run(self, fsname, username=None, runname=None, desc=None,
runyamlpath=None, runyamlraw=None, pipelineid=None, param=None, disabled=None, dockerenv=None):
"""
create run
"""
self.pre_check()
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if runname and runname.strip() == "":
raise PaddleFlowSDKException("InvalidRunName", "runname should not be none or empty")
return RunServiceApi.add_run(self.paddleflow_server, fsname, runname, desc,
param, username, runyamlpath, runyamlraw, pipelineid, self.header, disabled, dockerenv)
def list_run(self, fsname=None, username=None, runid=None, runname=None, maxsize=100, marker=None):
"""
list run
"""
self.pre_check()
if fsname and fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFSName", "fsname should not be none or empty")
if username and username.strip() == "":
raise PaddleFlowSDKException("InvalidUserName", "username should not be none or empty")
if runid and runid.strip() == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
return RunServiceApi.list_run(self.paddleflow_server, fsname,
username, runid, runname, self.header, maxsize, marker)
def status_run(self, runid):
"""
status run
"""
self.pre_check()
if runid is None or runid.strip() == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
return RunServiceApi.status_run(self.paddleflow_server, runid, self.header)
def stop_run(self, runid, force=False):
"""
stop run
"""
self.pre_check()
if runid is None or runid.strip() == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
if not isinstance(force, bool):
raise PaddleFlowSDKException("InvalidParam", "the Parameter [force] should be an instance of bool")
return RunServiceApi.stop_run(self.paddleflow_server, runid, self.header, force)
def create_cluster(self, clustername, endpoint, clustertype, credential=None,
description=None, source=None, setting=None, status=None, namespacelist=None, version=None):
"""
create cluster
"""
self.pre_check()
if clustername is None or clustername.strip() == "":
raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty")
if endpoint is None or endpoint.strip() == "":
raise PaddleFlowSDKException("InvalidEndpoint", "endpoint should not be none or empty")
if clustertype is None or clustertype.strip() == "":
raise PaddleFlowSDKException("InvalidClusterType", "clustertype should not be none or empty")
return ClusterServiceApi.create_cluster(self.paddleflow_server, clustername, endpoint, clustertype,
credential, description, source, setting, status, namespacelist, version, self.header)
def list_cluster(self, maxkeys=100, marker=None, clustername=None, clusterstatus=None):
"""
list cluster
"""
self.pre_check()
return ClusterServiceApi.list_cluster(self.paddleflow_server, maxkeys, marker,
clustername, clusterstatus, self.header)
def show_cluster(self, clustername):
"""
status cluster
"""
self.pre_check()
if clustername is None or clustername == "":
raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty")
return ClusterServiceApi.show_cluster(self.paddleflow_server, clustername, self.header)
def delete_cluster(self, clustername):
"""
delete cluster
"""
self.pre_check()
if clustername is None or clustername == "":
raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty")
return ClusterServiceApi.delete_cluster(self.paddleflow_server, clustername, self.header)
def update_cluster(self, clustername, endpoint=None, credential=None, clustertype=None,
description=None, source=None, setting=None, status=None, namespacelist=None, version=None):
"""
update cluster
"""
self.pre_check()
if clustername is None or clustername == "":
raise PaddleFlowSDKException("InvalidClusterName", "clustername should not be none or empty")
return ClusterServiceApi.update_cluster(self.paddleflow_server, clustername, endpoint, credential,
clustertype, description, source, setting, status, namespacelist, version, self.header)
def list_cluster_resource(self, clustername=None):
"""
list cluster resource
"""
self.pre_check()
return ClusterServiceApi.list_cluster_resource(self.paddleflow_server, clustername, self.header)
def create_pipeline(self, fsname, yamlpath, name=None, username=None):
"""
create pipeline
"""
self.pre_check()
if fsname is None or fsname.strip() == "":
raise PaddleFlowSDKException("InvalidFsName", "fsname should not be none or empty")
if yamlpath is None or yamlpath.strip() == "":
raise PaddleFlowSDKException("InvalidYamlPath", "yamlpath should not be none or empty")
return PipelineServiceApi.create_pipeline(self.paddleflow_server, fsname, yamlpath, name,
username, self.header)
def list_pipeline(self, userfilter=None, fsfilter=None, namefilter=None, maxkeys=None, marker=None):
"""
list pipeline
"""
self.pre_check()
return PipelineServiceApi.list_pipeline(self.paddleflow_server, userfilter, fsfilter,
namefilter, maxkeys, marker, self.header)
def show_pipeline(self, pipelineid):
"""
status pipeline
"""
self.pre_check()
if pipelineid is None or pipelineid == "":
raise PaddleFlowSDKException("InvalidPipelineID", "pipelineid should not be none or empty")
return PipelineServiceApi.show_pipeline(self.paddleflow_server, pipelineid, self.header)
def delete_pipeline(self, pipelineid):
"""
delete pipeline
"""
self.pre_check()
if pipelineid is None or pipelineid == "":
raise PaddleFlowSDKException("InvalidPipelineID", "pipelineid should not be none or empty")
return PipelineServiceApi.delete_pipeline(self.paddleflow_server, pipelineid, self.header)
def retry_run(self, runid):
"""
retry run
"""
self.pre_check()
if runid is None or runid == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
return RunServiceApi.retry_run(self.paddleflow_server, runid, self.header)
def delete_run(self, runid):
"""
status run
"""
self.pre_check()
if runid is None or runid == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
return RunServiceApi.delete_run(self.paddleflow_server, runid, self.header)
def artifact(self, userfilter=None, fsfilter=None, runfilter=None, typefilter=None, pathfilter=None,
maxkeys=None, marker=None):
"""
artifact
"""
self.pre_check()
return RunServiceApi.artifact(self.paddleflow_server, userfilter, fsfilter,
runfilter, typefilter, pathfilter, maxkeys, marker, self.header)
def list_cache(self, userfilter=None, fsfilter=None, runfilter=None,
maxkeys=None, marker=None):
"""
list run cache
"""
self.pre_check()
return RunServiceApi.list_runcache(self.paddleflow_server, userfilter, fsfilter,
runfilter, maxkeys, marker, self.header)
def show_cache(self, cacheid):
"""
status pipeline
"""
self.pre_check()
if cacheid is None or cacheid == "":
raise PaddleFlowSDKException("InvalidCacheID", "cacheid should not be none or empty")
return RunServiceApi.show_runcache(self.paddleflow_server, cacheid, self.header)
def delete_cache(self, cacheid):
"""
status pipeline
"""
self.pre_check()
if cacheid is None or cacheid == "":
raise PaddleFlowSDKException("InvalidCacheID", "cacheid should not be none or empty")
return RunServiceApi.delete_runcache(self.paddleflow_server, cacheid, self.header)
def show_log(self, runid, jobid=None, pagesize=None, pageno=None, logfileposition=None):
"""
show run log
"""
self.pre_check()
if runid is None or runid == "":
raise PaddleFlowSDKException("InvalidRunID", "runid should not be none or empty")
return LogServiceApi.get_log_info(self.paddleflow_server, runid, jobid, pagesize, pageno, logfileposition,
self.header)
def create_job(self, job_type, job_request):
"""
create_job
"""
self.pre_check()
if job_type is None or (job_type != 'single' and job_type != 'distributed' and job_type != 'workflow'):
raise PaddleFlowSDKException("InvalidJobType", "job_type should not be none and should be single, distributed or workflow")
if job_request.queue is None or job_request.queue == '':
raise PaddleFlowSDKException("InvalidJobRequest", "job_request queue should not be none or empty")
return JobServiceApi.create_job(self.paddleflow_server, job_type, job_request, self.header)
def show_job(self, jobid):
"""
show_job
"""
self.pre_check()
if jobid is None or jobid == "":
raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty")
return JobServiceApi.show_job(self.paddleflow_server, jobid, self.header)
def list_job(self, status=None, timestamp=None, start_time=None, queue=None, labels=None, maxkeys=None, marker=None):
"""
list_job
"""
self.pre_check()
return JobServiceApi.list_job(self.paddleflow_server, status, timestamp, start_time, queue, labels, maxkeys, marker, self.header)
def stop_job(self, jobid):
"""
stop_job
"""
self.pre_check()
if jobid is None or jobid == "":
raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty")
return JobServiceApi.stop_job(self.paddleflow_server, jobid, self.header)
def delete_job(self, jobid):
"""
delete_job
"""
self.pre_check()
if jobid is None or jobid == "":
raise PaddleFlowSDKException("InvalidJobID", "jobid should not be none or empty")
return JobServiceApi.delete_job(self.paddleflow_server, jobid, self.header)
| StarcoderdataPython |
11274149 | <gh_stars>1-10
"""
Base GraphQL utilities
isort:skip_file
"""
# The GraphQL query recommended for a full schema introspection.
from .introspection_query import introspection_query
# Gets the target Operation from a Document
from .get_operation_ast import get_operation_ast
# Build a GraphQLSchema from an introspection result.
from .build_client_schema import build_client_schema
# Build a GraphQLSchema from a parsed GraphQL Schema language AST.
from .build_ast_schema import build_ast_schema
# Extends an existing GraphQLSchema from a parsed GraphQL Schema language AST.
from .extend_schema import extend_schema
# Print a GraphQLSchema to GraphQL Schema language.
from .schema_printer import print_schema, print_introspection_schema
# Create a GraphQLType from a GraphQL language AST.
from .type_from_ast import type_from_ast
# Create a JavaScript value from a GraphQL language AST.
from .value_from_ast import value_from_ast
# Create a GraphQL language AST from a JavaScript value.
from .ast_from_value import ast_from_value
# A helper to use within recursive-descent visitors which need to be aware of
# the GraphQL type system.
from .type_info import TypeInfo
# Determine if JavaScript values adhere to a GraphQL type.
from .is_valid_value import is_valid_value
# Determine if AST values adhere to a GraphQL type.
from .is_valid_literal_value import is_valid_literal_value
# Concatenates multiple AST together.
from .concat_ast import concat_ast
# Comparators for types
from .type_comparators import (
is_equal_type,
is_type_sub_type_of,
do_types_overlap
)
# Asserts that a string is a valid GraphQL name
from .assert_valid_name import assert_valid_name
# Undefined const
from .undefined import Undefined
__all__ = [
'introspection_query',
'get_operation_ast',
'build_client_schema',
'build_ast_schema',
'extend_schema',
'print_introspection_schema',
'print_schema',
'type_from_ast',
'value_from_ast',
'ast_from_value',
'TypeInfo',
'is_valid_value',
'is_valid_literal_value',
'concat_ast',
'do_types_overlap',
'is_equal_type',
'is_type_sub_type_of',
'assert_valid_name',
'Undefined',
]
| StarcoderdataPython |
5128943 | <reponame>mkoivi-ms/InnerEye-DeepLearning<gh_stars>1-10
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import numpy as np
import param
import torch.cuda
import torch.utils.data
from torch import Tensor
from torch.cuda import amp
from torch.cuda.amp import GradScaler
from torch.nn import MSELoss
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from InnerEye.Common import common_util
from InnerEye.Common.common_util import MetricsDataframeLoggers
from InnerEye.Common.metrics_dict import MetricType, MetricsDict, create_metrics_dict_from_config
from InnerEye.Common.type_annotations import T
from InnerEye.ML import metrics
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import BACKGROUND_CLASS_NAME, SegmentationLoss, SegmentationModelBase
from InnerEye.ML.dataset.sample import CroppedSample
from InnerEye.ML.dataset.scalar_sample import ScalarItem
from InnerEye.ML.dataset.sequence_sample import ClassificationItemSequence
from InnerEye.ML.deep_learning_config import DeepLearningConfig
from InnerEye.ML.metrics import AzureAndTensorboardLogger, AzureMLLogger, compute_scalar_metrics
from InnerEye.ML.models.architectures.base_model import DeviceAwareModule
from InnerEye.ML.models.losses.cross_entropy import CrossEntropyLoss
from InnerEye.ML.models.losses.ece import ECELoss
from InnerEye.ML.models.losses.mixture import MixtureLoss
from InnerEye.ML.models.losses.soft_dice import SoftDiceLoss
from InnerEye.ML.models.parallel.data_parallel import DataParallelCriterion, DataParallelModel, \
execute_within_autocast_if_needed
from InnerEye.ML.pipelines.forward_pass import SegmentationForwardPass, single_optimizer_step
from InnerEye.ML.scalar_config import ScalarLoss, ScalarModelBase
from InnerEye.ML.sequence_config import SequenceModelBase
from InnerEye.ML.utils import dataset_util, metrics_util
from InnerEye.ML.utils.dataset_util import DatasetExample
from InnerEye.ML.utils.image_util import NumpyOrTorch
from InnerEye.ML.utils.metrics_util import SummaryWriters
from InnerEye.ML.utils.sequence_utils import get_masked_model_outputs_and_labels
from InnerEye.ML.utils.supervised_criterion import BinaryCrossEntropyWithLogitsLoss, SupervisedLearningCriterion
from InnerEye.ML.utils.temperature_scaling import ModelWithTemperature
from InnerEye.ML.utils.training_util import ModelForwardAndBackwardsOutputs, gather_tensor
from InnerEye.ML.visualizers.grad_cam_hooks import VisualizationMaps
from InnerEye.ML.visualizers.regression_visualization import plot_variation_error_prediction
C = TypeVar('C', bound=DeepLearningConfig)
M = TypeVar('M', bound=DeviceAwareModule)
E = TypeVar('E', List[ClassificationItemSequence[ScalarItem]], ScalarItem)
class TrainValidateParameters(param.Parameterized, Generic[M]):
"""
Bundles parameters needed for training and validation.
model: the model to be used
data_loader: data loader for a cropped sample
epoch: current epoch number
optimizer: optimizer
"""
model: M = param.ClassSelector(class_=DeviceAwareModule, instantiate=False)
mean_teacher_model: M = param.ClassSelector(class_=DeviceAwareModule, instantiate=False, allow_None=True)
data_loader: DataLoader = param.ClassSelector(class_=DataLoader, instantiate=False)
epoch: int = param.Integer(None, bounds=(0, None))
optimizer: Optimizer = param.ClassSelector(class_=Optimizer, instantiate=False)
epoch_learning_rate: List[float] = param.List(None, class_=float, bounds=(1, None), instantiate=False)
summary_writers: SummaryWriters = param.ClassSelector(class_=SummaryWriters, instantiate=False)
in_training_mode: bool = param.Boolean(default=True)
dataframe_loggers: MetricsDataframeLoggers = param.ClassSelector(class_=MetricsDataframeLoggers, instantiate=False)
save_metrics: bool = param.Boolean(default=True)
gradient_scaler = param.ClassSelector(class_=GradScaler, instantiate=False)
class ModelTrainingStepsBase(Generic[C, M], ABC):
"""
A base class that contains methods that each type of model (segmentation, classification) must implement,
so that it can fit into the generic training routine. An implementation of this base class must have a means of
keeping track of the training results on individual minibatches, and return them at the end of an epoch.
"""
def __init__(self, model_config: C, train_val_params: TrainValidateParameters[M]):
self.model_config = model_config
self.train_val_params = train_val_params
self.criterion = self.create_criterion()
if self.in_training_mode:
self.df_logger = self.train_val_params.dataframe_loggers.train_epoch_metrics
tensorboard_logger = self.train_val_params.summary_writers.train
azureml_logging_prefix = f"{ModelExecutionMode.TRAIN.value}_"
else:
self.df_logger = self.train_val_params.dataframe_loggers.val_epoch_metrics
tensorboard_logger = self.train_val_params.summary_writers.val
azureml_logging_prefix = f"{ModelExecutionMode.VAL.value}_"
azureml_logger = AzureMLLogger(logging_prefix=azureml_logging_prefix,
log_to_parent_run=model_config.log_to_parent_run,
cross_validation_split_index=model_config.cross_validation_split_index)
self.azure_and_tensorboard_logger = AzureAndTensorboardLogger(azureml_logger=azureml_logger,
tensorboard_logger=tensorboard_logger,
epoch=self.train_val_params.epoch)
@property
def in_training_mode(self) -> bool:
"""
Returns True if the parameters indicate that the model should run in training mode (backpropagating the
loss and adjusting weights). Returns False if the model should make predictions on the validation set.
"""
return self.train_val_params.in_training_mode
@abstractmethod
def forward_and_backward_minibatch(self, sample: Dict[str, Any],
batch_index: int, epoch: int) -> ModelForwardAndBackwardsOutputs:
"""
Runs training for a single minibatch of training data, and returns the loss.
:param sample: The batched sample on which the model should be trained.
:param batch_index: The index of the present batch (supplied only for diagnostics).
:param epoch: The number of the present epoch.
"""
raise NotImplementedError("forward_minibatch must be implemented by derived class.")
@abstractmethod
def get_epoch_results_and_store(self, epoch_time_seconds: float) -> MetricsDict:
"""
This method should assemble all training results that were achieved over all minibatches, store
or log them in a suitable way, and then return them.
:param epoch_time_seconds: For diagnostics, this is the total time in seconds for training the present epoch.
:return: An object that holds an aggregate of the training results over the epoch.
"""
raise NotImplementedError("get_epoch_results_and_store must be implemented by children")
@abstractmethod
def create_loss_function(self) -> torch.nn.Module:
"""
Returns a torch module that computes a loss function.
"""
raise NotImplementedError("create_loss_function must be implemented by children")
def create_criterion(self) -> torch.nn.Module:
"""
Returns a torch module that creates a criterion module which can be a DataParallelCriterion
if use_data_parallel is enabled or the loss function module otherwise.
"""
loss_function = self.create_loss_function()
if self.model_config.use_data_parallel:
return DataParallelCriterion(module=loss_function,
device_ids=self.model_config.get_cuda_devices(), # type:ignore
use_mixed_precision=self.model_config.use_mixed_precision)
else:
return loss_function
def compute_loss(self, model_output: torch.Tensor, labels: NumpyOrTorch) -> torch.Tensor:
"""
Provided model outputs (logits) applies the criterion function and returns the loss tensor.
If data parallel is used, then the independent loss values are aggregated by averaging.
:param model_output: Model output logits (unnormalised)
:param labels: A tensor or numpy array of labels.
"""
# ensure that the labels are loaded into the GPU
labels = self.model_config.get_gpu_tensor_if_possible(labels)
loss = self.forward_criterion_with_autocast(model_output, labels)
if self.model_config.use_data_parallel:
# Aggregate the loss values for each parallelized batch element.
loss = torch.mean(loss)
return loss
def forward_criterion(self, model_output: Union[torch.Tensor, List[torch.Tensor]],
labels: NumpyOrTorch) -> torch.Tensor:
"""
Handles the forward pass for the loss function.
:param model_output: A single Tensor, or a list if using DataParallelCriterion
:param labels: Labels to compute loss against.
:return: loss tensor.
"""
return self.criterion(model_output, labels)
def forward_criterion_with_autocast(self,
model_output: Union[torch.Tensor, List[torch.Tensor]],
labels: NumpyOrTorch) -> torch.Tensor:
"""
Handles the forward pass for the loss function, possibly taking mixed precision into account.
:param model_output: A single Tensor, or a list if using DataParallelCriterion
:param labels: Labels to compute loss against.
:return: loss tensor. This can be a float16 or float32 tensor, which should be cast to float32 before further
use.
"""
if self.model_config.use_mixed_precision:
with amp.autocast():
return self.forward_criterion(model_output, labels)
else:
return self.forward_criterion(model_output, labels)
@dataclass
class ScalarModelInputsAndLabels(Generic[E, T]):
"""
Holds the results of calling get_scalar_model_inputs_and_labels: For a given sample returned by the data loader,
create the model inputs, the labels, the list of subjects (data loader sample can be batched),
and the reconstructed data item.
"""
model_inputs: List[torch.Tensor]
labels: T
subject_ids: List[str]
data_item: E
def __post_init__(self) -> None:
common_util.check_properties_are_not_none(self)
def get_scalar_model_inputs_and_labels(model_config: ScalarModelBase,
model: torch.nn.Module,
sample: Dict[str, Any]) -> ScalarModelInputsAndLabels:
"""
For a model that predicts scalars, gets the model input tensors from a sample returned by the data loader.
:param model_config: The configuration object for the model.
:param model: The instantiated PyTorch model.
:param sample: A training sample, as returned by a PyTorch data loader (dictionary mapping from field name to value)
:return: An instance of ScalarModelInputsAndLabels, containing the list of model input tensors,
label tensor, subject IDs, and the data item reconstructed from the data loader output
"""
if isinstance(model, DataParallelModel):
model = model.get_module()
if isinstance(model_config, SequenceModelBase):
sequence_model: DeviceAwareModule[List[ClassificationItemSequence], torch.Tensor] = model # type: ignore
sequences = ClassificationItemSequence.from_minibatch(sample)
subject_ids = [x.id for x in sequences]
labels = ClassificationItemSequence.create_labels_tensor_for_minibatch(
sequences=sequences,
target_indices=model_config.get_target_indices()
)
model_inputs = sequence_model.get_input_tensors(sequences)
return ScalarModelInputsAndLabels[List[ClassificationItemSequence], torch.Tensor](
model_inputs=model_inputs,
labels=labels,
subject_ids=subject_ids,
data_item=sequences
)
else:
scalar_model: DeviceAwareModule[ScalarItem, torch.Tensor] = model # type: ignore
scalar_item = ScalarItem.from_dict(sample)
subject_ids = [str(x.id) for x in scalar_item.metadata] # type: ignore
model_inputs = scalar_model.get_input_tensors(scalar_item)
return ScalarModelInputsAndLabels[ScalarItem, torch.Tensor](
model_inputs=model_inputs,
labels=scalar_item.label,
subject_ids=subject_ids,
data_item=scalar_item
)
F = TypeVar("F", bound=ScalarModelBase)
class ModelTrainingStepsForScalarModel(ModelTrainingStepsBase[F, DeviceAwareModule]):
"""
This class implements all steps necessary for training an image classification model during a single epoch.
"""
def __init__(self, config: F, train_val_params: TrainValidateParameters[DeviceAwareModule]):
"""
Creates a new instance of the class.
:param config: The configuration of a classification model.
:param train_val_params: The parameters for training the model, including the optimizer and the data loaders.
"""
# This field needs to be defined in the constructor to keep pycharm happy, but before the call to the
# base class because the base class constructor create_loss_function
self.label_tensor_dtype = torch.float32
super().__init__(config, train_val_params)
self.metrics = create_metrics_dict_from_config(config)
self.compute_mean_teacher_model = self.model_config.compute_mean_teacher_model
if self.model_config.compute_grad_cam:
model_to_evaluate = self.train_val_params.mean_teacher_model if \
self.model_config.compute_mean_teacher_model else self.train_val_params.model
self.guided_grad_cam = VisualizationMaps(model_to_evaluate, self.model_config)
self.model_config.visualization_folder.mkdir(exist_ok=True)
def create_loss_function(self) -> torch.nn.Module:
"""
Returns a torch module that computes a loss function.
Depending on the chosen loss function, the required data type for the labels tensor is set in
self.
"""
if self.model_config.loss_type == ScalarLoss.BinaryCrossEntropyWithLogits:
return BinaryCrossEntropyWithLogitsLoss(smoothing_eps=self.model_config.label_smoothing_eps)
if self.model_config.loss_type == ScalarLoss.WeightedCrossEntropyWithLogits:
return BinaryCrossEntropyWithLogitsLoss(
smoothing_eps=self.model_config.label_smoothing_eps,
class_counts=self.model_config.get_training_class_counts())
elif self.model_config.loss_type == ScalarLoss.MeanSquaredError:
self.label_tensor_dtype = torch.float32
return MSELoss()
else:
raise NotImplementedError("Loss type {} is not implemented".format(self.model_config.loss_type))
def get_label_tensor(self, labels: torch.Tensor) -> torch.Tensor:
"""
Converts the given tensor to the right data format, depending on the chosen loss function.
:param labels: The label tensor that should be converted.
"""
try:
labels = labels.to(dtype=self.label_tensor_dtype)
except ValueError as ex:
raise ValueError(f"Unable to convert tensor {labels} to data type {self.label_tensor_dtype}: {str(ex)}")
return self.model_config.get_gpu_tensor_if_possible(labels)
def get_logits_and_posteriors(self, *model_inputs: torch.Tensor, use_mean_teacher_model: bool = False) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns a Tuple containing the logits and the final model output. Note that the logits might be
distributed over multiple GPU if the model is an instance of DataParallel. In this case,
the posteriors will be gathered to GPU_0.
:param model_inputs: input to evaluate the model on
:param use_mean_teacher_model: If True, logits and posteriors are produced for the mean teacher model. Else
logits and posteriors are produced for the standard (student) model.
:return: Tuple (logits, posteriors).
"""
if use_mean_teacher_model:
logits = self.train_val_params.mean_teacher_model(*model_inputs)
else:
logits = self.train_val_params.model(*model_inputs)
posteriors = self.model_config.get_post_loss_logits_normalization_function()(gather_tensor(logits))
return logits, posteriors
def _compute_model_output_and_loss(self, model_inputs_and_labels: ScalarModelInputsAndLabels) -> \
Tuple[Tensor, Tensor, Tensor]:
"""
Computes the output of the model for a given set of inputs and labels.
Returns a tuple of (logits, posteriors, loss). For multi-GPU computation, the logits are returned
as a list.
"""
model = self.train_val_params.model
label_gpu = self.get_label_tensor(model_inputs_and_labels.labels)
if self.model_config.use_mixed_precision and self.model_config.use_gpu:
label_gpu = label_gpu.to(dtype=torch.float16)
def compute() -> Tuple[Tensor, Tensor, Tensor]:
if self.in_training_mode:
model.train()
logits, posteriors = self.get_logits_and_posteriors(*model_inputs_and_labels.model_inputs)
else:
model.eval()
with torch.no_grad():
logits, posteriors = self.get_logits_and_posteriors(*model_inputs_and_labels.model_inputs)
model.train()
loss = self.compute_loss(logits, label_gpu)
return logits, posteriors, loss
return execute_within_autocast_if_needed(func=compute, use_autocast=self.model_config.use_mixed_precision)
def forward_and_backward_minibatch(self, sample: Dict[str, Any],
batch_index: int, epoch: int) -> ModelForwardAndBackwardsOutputs:
"""
Runs training for a single minibatch of training data, and computes all metrics.
:param sample: The batched sample on which the model should be trained.
:param batch_index: The index of the present batch (supplied only for diagnostics).
:param epoch: The number of the present epoch.
"""
start_time = time.time()
model = self.train_val_params.model
mean_teacher_model = self.train_val_params.mean_teacher_model
model_inputs_and_labels = get_scalar_model_inputs_and_labels(self.model_config, model, sample)
label_gpu = self.get_label_tensor(model_inputs_and_labels.labels)
logits, posteriors, loss = self._compute_model_output_and_loss(model_inputs_and_labels)
gathered_logits = gather_tensor(logits)
if self.in_training_mode:
single_optimizer_step(loss,
self.train_val_params.optimizer,
self.train_val_params.gradient_scaler)
if self.model_config.compute_mean_teacher_model:
self.update_mean_teacher_parameters()
if self.compute_mean_teacher_model:
# If the mean teacher model is computed, use the output of the mean teacher for the metrics report
# instead of the output of the student model.
mean_teacher_model.eval()
with torch.no_grad():
logits, posteriors = self.get_logits_and_posteriors(
*model_inputs_and_labels.model_inputs,
use_mean_teacher_model=True)
gathered_logits = gather_tensor(logits)
# Autocast may have returned float16 tensors. Documentation suggests to simply cast back to float32.
# If tensor was already float32, no overhead is incurred.
posteriors = posteriors.detach().float()
gathered_logits = gathered_logits.detach().float().cpu()
loss_scalar = loss.float().item()
if self.train_val_params.save_metrics:
if self._should_save_grad_cam_output(epoch=epoch, batch_index=batch_index):
self.save_grad_cam(epoch, model_inputs_and_labels.subject_ids,
model_inputs_and_labels.data_item,
model_inputs_and_labels.model_inputs,
label_gpu)
self.metrics.add_metric(MetricType.LOSS, loss_scalar)
self.update_metrics(model_inputs_and_labels.subject_ids, posteriors, label_gpu)
logging.debug(f"Batch {batch_index}: {self.metrics.to_string()}")
minibatch_time = time.time() - start_time
self.metrics.add_metric(MetricType.SECONDS_PER_BATCH, minibatch_time)
return ModelForwardAndBackwardsOutputs(
loss=loss_scalar,
logits=gathered_logits,
labels=model_inputs_and_labels.labels
)
def get_epoch_results_and_store(self, epoch_time_seconds: float) -> MetricsDict:
"""
Assembles all training results that were achieved over all minibatches, returns them as a dictionary
mapping from metric name to metric value.
:param epoch_time_seconds: For diagnostics, this is the total time in seconds for training the present epoch.
:return: A dictionary that holds all metrics averaged over the epoch.
"""
self.metrics.add_metric(MetricType.SECONDS_PER_EPOCH, epoch_time_seconds)
assert len(self.train_val_params.epoch_learning_rate) == 1, "Expected a single entry for learning rate."
self.metrics.add_metric(MetricType.LEARNING_RATE, self.train_val_params.epoch_learning_rate[0])
averaged_across_hues = self.metrics.average(across_hues=False)
mode = ModelExecutionMode.TRAIN if self.in_training_mode else ModelExecutionMode.VAL
diagnostics_lines = averaged_across_hues.to_string()
logging.info(f"Results for epoch {self.train_val_params.epoch:3d} {mode.value}\n{diagnostics_lines}")
# Store subject level metrics
subject_logger = self.train_val_params.dataframe_loggers.train_subject_metrics if \
self.train_val_params.in_training_mode \
else self.train_val_params.dataframe_loggers.val_subject_metrics
self.metrics.store_metrics_per_subject(
epoch=self.train_val_params.epoch,
df_logger=subject_logger,
mode=mode,
cross_validation_split_index=self.model_config.cross_validation_split_index)
if self._should_save_regression_error_plot(self.train_val_params.epoch):
error_plot_name = f"error_plot_{self.train_val_params.epoch}"
path = str(self.model_config.outputs_folder / f"{error_plot_name}.png")
plot_variation_error_prediction(self.metrics.get_labels(), self.metrics.get_predictions(), path)
self.azure_and_tensorboard_logger.log_image(error_plot_name, path)
# Write metrics to Azure and TensorBoard
metrics.store_epoch_metrics(self.azure_and_tensorboard_logger,
self.df_logger,
self.train_val_params.epoch,
averaged_across_hues,
self.train_val_params.epoch_learning_rate,
self.model_config)
return self.metrics.average(across_hues=True)
def update_metrics(self, subject_ids: List[str], model_output: torch.Tensor, labels: torch.Tensor) -> None:
"""
Handle metrics updates based on the provided model outputs and labels.
"""
compute_scalar_metrics(self.metrics, subject_ids, model_output, labels, self.model_config.loss_type)
def save_grad_cam(self,
epoch: int,
subject_ids: List,
classification_item: Union[List[ClassificationItemSequence[ScalarItem]], ScalarItem],
model_inputs: List[torch.Tensor],
labels: torch.Tensor) -> None:
filenames = [f"{epoch}_viz_{id}" for id in subject_ids]
self.guided_grad_cam.save_visualizations_in_notebook(
classification_item, # type: ignore
model_inputs,
filenames,
ground_truth_labels=labels.cpu().numpy(),
gradcam_dir=self.model_config.visualization_folder
)
def update_mean_teacher_parameters(self) -> None:
"""
Updates the mean teacher model parameters as per the update formula
mean_teacher_model_weight = alpha * (mean_teacher_model_weight) + (1-alpha) * (student_model_weight)
see https://arxiv.org/abs/1703.01780
"""
mean_teacher_model = self.train_val_params.mean_teacher_model
model = self.train_val_params.model
if isinstance(mean_teacher_model, DataParallelModel):
mean_teacher_model = mean_teacher_model.module # type: ignore
model = model.module # type: ignore
for mean_teacher_param, student_param in zip(mean_teacher_model.parameters(), model.parameters()):
mean_teacher_param.data = self.model_config.mean_teacher_alpha * mean_teacher_param.data \
+ (1 - self.model_config.mean_teacher_alpha) * student_param.data
def _should_save_grad_cam_output(self, epoch: int, batch_index: int) -> bool:
return self.model_config.is_classification_model \
and (not self.in_training_mode) \
and self.model_config.should_save_epoch(epoch) \
and (batch_index < self.model_config.max_batch_grad_cam)
def _should_save_regression_error_plot(self, epoch: int) -> bool:
return self.model_config.is_regression_model \
and (not self.in_training_mode) \
and self.model_config.should_save_epoch(epoch)
class ModelTrainingStepsForSequenceModel(ModelTrainingStepsForScalarModel[SequenceModelBase]):
"""
This class implements all steps necessary for training an sequence model during a single epoch.
"""
def forward_criterion(self, model_output: Union[torch.Tensor, List[torch.Tensor]],
labels: NumpyOrTorch) -> torch.Tensor:
_model_output: torch.Tensor
# we need to gather the model outputs before masking them for the criterion.
if isinstance(model_output, list):
# When using multiple GPUs, model_output is a list of tensors. Gather will concatenate them
# across the first dimension, and move them to GPU0.
_model_output = torch.nn.parallel.gather(model_output, target_device=0)
else:
_model_output = model_output
# create masked sequences based on the labels
masked_model_outputs_and_labels = get_masked_model_outputs_and_labels(_model_output, labels)
if masked_model_outputs_and_labels is None:
raise ValueError("Invalid model_output and labels found")
# do not use a data parallel criterion as we have gathered the model outputs
if isinstance(self.criterion, DataParallelCriterion):
criterion = self.criterion.module # type: ignore
else:
criterion = self.criterion
return criterion(masked_model_outputs_and_labels.model_outputs, masked_model_outputs_and_labels.labels)
def learn_temperature_scale_parameter(self, logits: torch.Tensor, labels: torch.Tensor) -> float:
"""
Uses the provided logits and labels to learn a temperature scale parameter.
:param logits: Logits to use in order to learn a temperature scale parameter
:param labels: Labels to use in order to learn a temperature scale parameter
:return Optimal temperature value
"""
_model: Union[DeviceAwareModule, DataParallelModel, ModelWithTemperature] = self.train_val_params.model
assert self.model_config.temperature_scaling_config is not None
ece_criterion: ECELoss = ECELoss(activation=self.model_config.get_post_loss_logits_normalization_function(),
n_bins=self.model_config.temperature_scaling_config.ece_num_bins)
if self.model_config.use_gpu:
ece_criterion = ece_criterion.cuda()
if isinstance(_model, DataParallelModel):
_model = _model.get_module()
def _forward_criterion(_logits: torch.Tensor, _labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
loss = self.forward_criterion_with_autocast(_logits, _labels).to(torch.float32)
masked_model_outputs_and_labels = get_masked_model_outputs_and_labels(_logits, _labels)
assert masked_model_outputs_and_labels is not None
ece = ece_criterion(masked_model_outputs_and_labels.model_outputs.data.unsqueeze(dim=0),
masked_model_outputs_and_labels.labels.data.unsqueeze(dim=0))
return loss, ece
assert isinstance(_model, ModelWithTemperature)
return _model.set_temperature(
logits=logits,
labels=labels,
criterion_fn=_forward_criterion,
use_gpu=self.model_config.use_gpu,
logger=self.azure_and_tensorboard_logger
)
class ModelTrainingStepsForSegmentation(ModelTrainingStepsBase[SegmentationModelBase, DeviceAwareModule]):
"""
This class implements all steps necessary for training an image segmentation model during a single epoch.
"""
def __init__(self, model_config: SegmentationModelBase,
train_val_params: TrainValidateParameters[DeviceAwareModule]):
"""
Creates a new instance of the class.
:param model_config: The configuration of a segmentation model.
:param train_val_params: The parameters for training the model, including the optimizer and the data loaders.
"""
super().__init__(model_config, train_val_params)
self.example_to_save = np.random.randint(0, len(train_val_params.data_loader))
self.pipeline = SegmentationForwardPass(model=self.train_val_params.model,
model_config=self.model_config,
batch_size=self.model_config.train_batch_size,
optimizer=self.train_val_params.optimizer,
in_training_mode=self.train_val_params.in_training_mode,
criterion=self.compute_loss,
gradient_scaler=train_val_params.gradient_scaler)
self.metrics = MetricsDict(hues=[BACKGROUND_CLASS_NAME] + model_config.ground_truth_ids)
def create_loss_function(self) -> torch.nn.Module:
"""
Returns a torch module that computes a loss function.
"""
return self.construct_loss_function(self.model_config)
@classmethod
def construct_loss_function(cls, model_config: SegmentationModelBase) -> SupervisedLearningCriterion:
"""
Returns a loss function from the model config; mixture losses are constructed as weighted combinations of
other loss functions.
"""
if model_config.loss_type == SegmentationLoss.Mixture:
components = model_config.mixture_loss_components
assert components is not None
sum_weights = sum(component.weight for component in components)
weights_and_losses = []
for component in components:
normalized_weight = component.weight / sum_weights
loss_function = cls.construct_non_mixture_loss_function(model_config, component.loss_type,
component.class_weight_power)
weights_and_losses.append((normalized_weight, loss_function))
return MixtureLoss(weights_and_losses)
return cls.construct_non_mixture_loss_function(model_config, model_config.loss_type,
model_config.loss_class_weight_power)
@classmethod
def construct_non_mixture_loss_function(cls,
model_config: SegmentationModelBase,
loss_type: SegmentationLoss,
power: Optional[float]) -> SupervisedLearningCriterion:
"""
:param model_config: model configuration to get some parameters from
:param loss_type: type of loss function
:param power: value for class_weight_power for the loss function
:return: instance of loss function
"""
if loss_type == SegmentationLoss.SoftDice:
return SoftDiceLoss(class_weight_power=power)
elif loss_type == SegmentationLoss.CrossEntropy:
return CrossEntropyLoss(class_weight_power=power,
smoothing_eps=model_config.label_smoothing_eps,
focal_loss_gamma=None)
elif loss_type == SegmentationLoss.Focal:
return CrossEntropyLoss(class_weight_power=power,
smoothing_eps=model_config.label_smoothing_eps,
focal_loss_gamma=model_config.focal_loss_gamma)
else:
raise NotImplementedError("Loss type {} is not implemented".format(loss_type))
def forward_and_backward_minibatch(self, sample: Dict[str, Any],
batch_index: int, epoch: int) -> ModelForwardAndBackwardsOutputs:
"""
Runs training for a single minibatch of training data, and computes all metrics.
:param sample: The batched sample on which the model should be trained.
:param batch_index: The index of the present batch (supplied only for diagnostics).
:param epoch: The number of the present epoch.
"""
cropped_sample: CroppedSample = CroppedSample.from_dict(sample=sample)
labels = self.model_config.get_gpu_tensor_if_possible(cropped_sample.labels_center_crop)
mask = None if self.train_val_params.in_training_mode else cropped_sample.mask_center_crop
forward_pass_result = self.pipeline.forward_pass_patches(patches=cropped_sample.image,
labels=labels,
mask=mask)
# Clear the GPU cache between forward and backward passes to avoid possible out-of-memory
torch.cuda.empty_cache()
dice_for_all_classes = metrics.compute_dice_across_patches(
segmentation=torch.tensor(forward_pass_result.segmentations).long(),
ground_truth=labels,
use_cuda=self.model_config.use_gpu,
allow_multiple_classes_for_each_pixel=True).cpu().numpy()
foreground_voxels = metrics_util.get_number_of_voxels_per_class(cropped_sample.labels)
# loss is a scalar, also when running the forward pass over multiple crops.
# dice_for_all_structures has one row per crop.
if forward_pass_result.loss is None:
raise ValueError("During training, the loss should always be computed, but the value is None.")
loss = forward_pass_result.loss
# store metrics per batch
self.metrics.add_metric(MetricType.LOSS, loss)
for i, ground_truth_id in enumerate(self.metrics.get_hue_names(include_default=False)):
for b in range(dice_for_all_classes.shape[0]):
self.metrics.add_metric(MetricType.DICE, dice_for_all_classes[b, i].item(),
hue=ground_truth_id, skip_nan_when_averaging=True)
self.metrics.add_metric(MetricType.VOXEL_COUNT, foreground_voxels[i], hue=ground_truth_id)
# store diagnostics per batch
center_indices = cropped_sample.center_indices
if isinstance(center_indices, torch.Tensor):
center_indices = center_indices.cpu().numpy()
self.metrics.add_diagnostics(MetricType.PATCH_CENTER.value, np.copy(center_indices))
if self.train_val_params.in_training_mode:
# store the sample train patch from this epoch for visualization
if batch_index == self.example_to_save and self.model_config.store_dataset_sample:
_store_dataset_sample(self.model_config, self.train_val_params.epoch, forward_pass_result,
cropped_sample)
return ModelForwardAndBackwardsOutputs(
loss=loss,
logits=forward_pass_result.posteriors,
labels=forward_pass_result.segmentations
)
def get_epoch_results_and_store(self, epoch_time_seconds: float) -> MetricsDict:
"""
Assembles all training results that were achieved over all minibatches, writes them to Tensorboard and
AzureML, and returns them as a MetricsDict object.
:param epoch_time_seconds: For diagnostics, this is the total time in seconds for training the present epoch.
:return: A dictionary that holds all metrics averaged over the epoch.
"""
self.metrics.add_metric(MetricType.SECONDS_PER_EPOCH, epoch_time_seconds)
assert len(self.train_val_params.epoch_learning_rate) == 1, "Expected a single entry for learning rate."
self.metrics.add_metric(MetricType.LEARNING_RATE, self.train_val_params.epoch_learning_rate[0])
result = metrics.aggregate_segmentation_metrics(self.metrics)
metrics.store_epoch_metrics(self.azure_and_tensorboard_logger,
self.df_logger,
self.train_val_params.epoch,
result,
self.train_val_params.epoch_learning_rate,
self.model_config)
return result
# noinspection PyUnresolvedReferences
def _store_dataset_sample(config: SegmentationModelBase,
epoch: int,
forward_pass_result: SegmentationForwardPass.Result,
sample: CroppedSample) -> None:
"""
Stores the first sample in a batch, along with it's results from the model forward pass
as Nifti to the file system.
:param config: Training configurations.
:param epoch: The epoch to which this sample belongs to.
:param forward_pass_result: The result of a model forward pass.
:param sample: The original crop sample used for training, as returned by the data loader
"""
# pick the first image from the batch as example
example = DatasetExample(epoch=epoch,
# noinspection PyTypeChecker
patient_id=sample.metadata[0].patient_id, # type: ignore
image=sample.image[0][0].numpy(),
labels=sample.labels[0].numpy(),
prediction=forward_pass_result.segmentations[0],
header=sample.metadata[0].image_header) # type: ignore
dataset_util.store_and_upload_example(dataset_example=example, args=config)
| StarcoderdataPython |
6543269 | <gh_stars>10-100
import pytest
import numpy as np
from geneal.genetic_algorithms.genetic_algorithm_base import GenAlgSolver
from geneal.utils.exceptions import NoFitnessFunction, InvalidInput
from geneal.utils.exceptions_messages import exception_messages
from geneal.genetic_algorithms.genetic_algorithm_base import (
allowed_selection_strategies,
)
class TestGenAlgSolver:
@pytest.mark.parametrize(
"excluded_genes, expected_mutation_rows, expected_mutation_cols",
[
pytest.param(
None,
np.array([7, 4, 8, 5, 7, 3, 7, 8, 5, 4]),
np.array([7, 7, 2, 5, 4, 1, 7, 5, 1, 4]),
id="no_excluded_genes",
),
pytest.param(
np.array([1, 2, 3, 5, 6, 7]),
np.array([7, 4, 8, 5, 7, 3, 7, 8, 5, 4]),
np.array([9, 9, 8, 4, 0, 4, 9, 9, 4, 4]),
id="excluded_genes",
),
],
)
def test_mutate_population(
self, excluded_genes, expected_mutation_rows, expected_mutation_cols
):
gen_alg = GenAlgSolver(
fitness_function=lambda x: x.sum(),
n_genes=10,
pop_size=10,
random_state=42,
excluded_genes=excluded_genes,
)
mutation_rows, mutation_cols = gen_alg.mutate_population(None, 10)
assert np.equal(mutation_rows, expected_mutation_rows).all()
assert np.equal(mutation_cols, expected_mutation_cols).all()
if excluded_genes is not None:
for index in excluded_genes:
assert index not in mutation_cols
@pytest.mark.parametrize(
"n_crossover_points, expected_result",
[
pytest.param(1, np.array([5]), id="n_crossover_points=1"),
pytest.param(2, np.array([0, 5]), id="n_crossover_points=2"),
pytest.param(3, np.array([[0, 5, 9]]), id="n_crossover_points=3"),
],
)
def test_get_crossover_points(self, n_crossover_points, expected_result):
gen_alg = GenAlgSolver(
fitness_function=lambda x: x.sum(),
n_genes=10,
n_crossover_points=n_crossover_points,
random_state=42,
)
crossover_points = gen_alg.get_crossover_points()
assert np.equal(crossover_points, expected_result).all()
def test_no_fitness_function_error(self):
with pytest.raises(Exception) as excinfo:
GenAlgSolver(n_genes=10, random_state=42)
assert excinfo.type == NoFitnessFunction
assert (
str(excinfo.value)
== "A fitness function must be defined or provided as an argument"
)
@pytest.mark.parametrize(
"pop_size, selection_strategy, expected_ma, expected_pa",
[
pytest.param(
10,
"roulette_wheel",
np.array([0, 4]),
np.array([3, 0]),
id="roulette_wheel-pop_size=10",
),
pytest.param(
11,
"roulette_wheel",
np.array([4, 3, 0]),
np.array([0, 0, 0]),
id="roulette_wheel-pop_size=11",
),
pytest.param(
10,
"two_by_two",
np.array([0, 2]),
np.array([1, 3]),
id="two_by_two-pop_size=10",
),
pytest.param(
11,
"two_by_two",
np.array([0, 2, 4]),
np.array([1, 3, 5]),
id="two_by_two-pop_size=11",
),
pytest.param(
10,
"random",
np.array([0, 4]),
np.array([[4, 1]]),
id="random-pop_size=10",
),
pytest.param(
11,
"random",
np.array([4, 4, 1]),
np.array([[0, 0, 1]]),
id="random-pop_size=11",
),
pytest.param(
10,
"tournament",
np.array([1, 1]),
np.array([[1, 3]]),
id="tournament-pop_size=10",
),
pytest.param(
11,
"tournament",
np.array([1, 1, 3]),
np.array([[1, 3, 1]]),
id="tournament-pop_size=11",
),
],
)
def test_make_selection(
self, pop_size, selection_strategy, expected_ma, expected_pa
):
np.random.seed(42)
n_genes = 10
gen_alg = GenAlgSolver(
fitness_function=lambda x: x.sum(),
pop_size=pop_size,
selection_strategy=selection_strategy,
n_genes=n_genes,
random_state=42,
)
fitness = np.random.rand(pop_size, 1)
ma, pa = gen_alg.select_parents(fitness)
assert np.allclose(ma, expected_ma)
assert np.allclose(pa, expected_pa)
@pytest.mark.parametrize(
"algorithm_input, expected_exception_message",
[
pytest.param(
{"pop_size": 1},
exception_messages["InvalidPopulationSize"],
id="InvalidPopulationSize",
),
pytest.param(
{"selection_strategy": "invalid_strategy"},
exception_messages["InvalidSelectionStrategy"](
"invalid_strategy", allowed_selection_strategies
),
id="InvalidSelectionStrategy",
),
pytest.param(
{"excluded_genes": "invalid_excluded_genes"},
exception_messages["InvalidExcludedGenes"]("invalid_excluded_genes"),
id="InvalidExcludedGenes",
),
],
)
def test_exceptions(self, algorithm_input, expected_exception_message):
with pytest.raises(Exception) as excinfo:
gen_alg = GenAlgSolver(
fitness_function=lambda x: x.sum(),
n_genes=1,
random_state=42,
**algorithm_input
)
print(excinfo)
assert excinfo.type == InvalidInput
assert str(excinfo.value) == expected_exception_message
| StarcoderdataPython |
3480683 | <filename>MonteCarloMarginalizeCode/data/BNS_2015_MDC/DerivedZeroSpinTests/get_coinc.py<gh_stars>1-10
#!/usr/bin/env python
import sys
import math
import sqlite3
# Stolen from gstlal_inspiral_plotsummary
sim_coinc_map = """
CREATE TEMPORARY TABLE
sim_coinc_map
AS
SELECT
sim_inspiral.simulation_id AS simulation_id,
(
SELECT
coinc_inspiral.coinc_event_id
FROM
coinc_event_map AS a
JOIN coinc_event_map AS b ON (
b.coinc_event_id == a.coinc_event_id
)
JOIN coinc_inspiral ON (
b.table_name == 'coinc_event'
AND b.event_id == coinc_inspiral.coinc_event_id
)
WHERE
a.table_name == 'sim_inspiral'
AND a.event_id == sim_inspiral.simulation_id
ORDER BY
coinc_inspiral.false_alarm_rate
LIMIT 1
) AS coinc_event_id,
(
SELECT
coinc_inspiral.end_time
FROM
coinc_event_map AS a
JOIN coinc_event_map AS b ON (
b.coinc_event_id == a.coinc_event_id
)
JOIN coinc_inspiral ON (
b.table_name == 'coinc_event'
AND b.event_id == coinc_inspiral.coinc_event_id
)
WHERE
a.table_name == 'sim_inspiral'
AND a.event_id == sim_inspiral.simulation_id
ORDER BY
coinc_inspiral.false_alarm_rate
LIMIT 1
) AS coinc_end_time,
(
SELECT
coinc_inspiral.end_time_ns
FROM
coinc_event_map AS a
JOIN coinc_event_map AS b ON (
b.coinc_event_id == a.coinc_event_id
)
JOIN coinc_inspiral ON (
b.table_name == 'coinc_event'
AND b.event_id == coinc_inspiral.coinc_event_id
)
WHERE
a.table_name == 'sim_inspiral'
AND a.event_id == sim_inspiral.simulation_id
ORDER BY
coinc_inspiral.false_alarm_rate
LIMIT 1
) AS coinc_end_time_ns
FROM
sim_inspiral
WHERE
coinc_event_id IS NOT NULL
"""
select_coincs = """
SELECT
-- sim_inspiral.*,
-- sngl_inspiral.*
sim_coinc_map.coinc_event_id, sngl_inspiral.ifo, sngl_inspiral.mass1, sngl_inspiral.mass2, sngl_inspiral.snr, sim_coinc_map.coinc_end_time, sim_coinc_map.coinc_end_time_ns
FROM
sim_inspiral
JOIN sim_coinc_map ON (
sim_coinc_map.simulation_id == sim_inspiral.simulation_id
)
JOIN coinc_event_map ON (
coinc_event_map.coinc_event_id == sim_coinc_map.coinc_event_id
)
JOIN sngl_inspiral ON (
coinc_event_map.table_name == 'sngl_inspiral'
AND coinc_event_map.event_id == sngl_inspiral.event_id
)
WHERE sngl_inspiral.snr > 4.0 and sim_inspiral.simulation_id == "sim_inspiral:simulation_id:%d"
"""
count_coincs = """
SELECT COUNT(*) FROM sim_coinc_map
"""
def count_sim_coinc(db):
return int(list(db.execute(count_coincs))[0][0])
def get_coinc(db, sim_id):
net_snr = 0
result = list(db.execute(select_coincs % sim_id))
# Already ordered by FAR take lowest FAR
for id, ifo, m1, m2, snr, et, et_ns in result[:2]:
net_snr += snr**2
return id, m1, m2, et, et_ns, math.sqrt(net_snr)
def add_tmp_table(db):
db.execute(sim_coinc_map)
if __file__ == sys.argv[0]:
connection = sqlite3.connect(sys.argv[1])
add_tmp_table(connection)
id, m1, m2, etime, etime_ns, net_snr = get_coinc(connection, int(sys.argv[2]))
connection.close()
print id, m1, m2, "%d.%d" % (etime, etime_ns), net_snr
| StarcoderdataPython |
8181530 | #Problema1
def numeros_divisibles_multiplos (limite_inferior, limite_superior):
if limite_superior > limite_inferior:
resultado = []
for n in range (limite_inferior, limite_superior + 1):
if n % 7 == 0 and n % 5 == 0:
resultado.append(n)
return resultado
raise ValueError('El limite inferior debe ser menor al limite superior.')
numeros = numeros_divisibles_multiplos(1500, 2700)
print (numeros)
#Problema2
n = 5
for i in range (1, n + 1):
for j in range (i):
print ('* ', end='')
print ()
for i in range (n - 1, 0, -1):
for j in range (i):
print ('* ', end='')
print ()
#Problema3
def contar_pares_impares(lista):
pares, impares = 0,0
for n in lista:
if n % 2 == 0:
pares += 1
else:
impares += 1
return pares, impares
numeros = [1,2,3,4,5,6,7,8,9]
resultado = contar_pares_impares(numeros)
print('La cantidad de pares es: %i' % resultado[0])
print('La cantidad de pares es: %i' % resultado[1])
#Problema5
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
for x in range(20):
print (fib(x))
#Problema6
def contar_digitos_letras (cadena):
digitos = 0
letras = 0
for c in cadena:
if c.isdigit():
digitos += 1
elif c.isalpha():
letras += 1
else:
pass
return letras, digitos
texto = input ('Digite un texto: ')
resultado = contar_digitos_letras(texto)
print ('Cantidad de letras: %i' % resultado[0])
print ('Cantidad de digitos: %i' % resultado[1])
#Problema7
def multiplicar(numeros):
producto = 1
for n in numeros:
producto *= n
return producto
lista_numeros = [8, 2, 3, -1, 7]
print(multiplicar(lista_numeros))
#Problema8
cadena = '1234abcd'
print (cadena[::-1])
#Problema9
def valores_unicos(lista):
return list(set (lista))
numeros = [2,3,3,5,7,0,0,1,11,13,13,13]
resultado = valores_unicos(numeros)
print(numeros)
print(resultado)
#Problema10
def es_primo (numero):
if numero == 1:
return False
elif numero == 2:
return True
else:
for i in range (2,numero):
if numero % i == 0:
return False
return True
for i in range (1,100):
print (i, es_primo(i))
#Problema11
def pares(numeros):
numeros_pares = []
for n in numeros:
if n % 2 == 0:
numeros_pares.append(n)
return numeros_pares
numeros = [1,2,3,4,5,6,7,8,9]
resultado = pares(numeros)
print (numeros)
print(resultado)
#Problema12
def factorial(n):
if n == 0:
return 1
else:
return n* factorial(n-1)
print (factorial(5))
| StarcoderdataPython |
4959760 | <gh_stars>0
# Copyright (c) 2010-2019 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def ExtendedProperties():
from ..extended import ExtendedProperties
return ExtendedProperties
class TestExtendedProperties:
def test_ctor(self, ExtendedProperties):
from ..extended import get_version
props = ExtendedProperties()
xml = tostring(props.to_tree())
expected = """
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties">
<Application>Microsoft Excel</Application>
<AppVersion>{0}</AppVersion>
</Properties>
""".format(get_version())
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, ExtendedProperties):
src = """
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Macintosh Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>1</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="1" baseType="lpstr">
<vt:lpstr>Sheet</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company/>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>14.0300</AppVersion>
</Properties>
"""
node = fromstring(src)
props = ExtendedProperties.from_tree(node)
assert props == ExtendedProperties(
Application="Microsoft Macintosh Excel",
DocSecurity=0,
ScaleCrop=True,
LinksUpToDate=True,
SharedDoc=True,
HyperlinksChanged=True,
AppVersion='14.0300'
)
| StarcoderdataPython |
3494381 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
"""
method to create profile
"""
avatar = models.ImageField(upload_to='media/', null=True)
Bio = models.CharField(max_length=2000)
# deletion of profile and user when deleted
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="profile")
location = models.CharField(max_length=200)
phone_number = models.IntegerField(default=0)
email = models.CharField(max_length=500)
def __str__(self):
return self.user.username
# sender is source of signal
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
"""
method to create profile
:return:
"""
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
"""
method to save profile
:return:
"""
instance.profile.save()
# adding the project profile
class Project(models.Model):
"""
Project model creating table
"""
image = models.ImageField(upload_to='media', blank=True)
image2 = models.ImageField(upload_to='media', blank=True)
image3 = models.ImageField(upload_to='media', blank=True)
project_name = models.CharField(max_length=200)
caption = models.CharField(max_length=1000)
description = models.CharField(max_length=2000)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User , on_delete=models.CASCADE, default=True)
def save_project(self):
"""
method to save project images
:return:
"""
self.save()
@classmethod
def get_project(cls, project_id):
"""
method to get image by id
:return:
"""
projects = cls.objects.filter(id=project_id)
return projects
@classmethod
def show_projects(cls):
"""
method to get all images
:return:
"""
projects = cls.objects.all()
return projects
@classmethod
def single_project(cls, project_id):
"""
method to get projects by id
:return:
"""
projects = cls.objects.filter(id=project_id)
return projects
@classmethod
def search_by_project_name(cls, search_term):
"""
method to search for project by name
:return:
"""
project = cls.objects.filter(project_name=search_term)
return project
def delete_project(self):
"""
method to delete image
:return:
"""
self.delete()
# models for voting criterion
# class Votes(models.Model):
# """
# model for design measure criterion
# """
# design = models.IntegerField()
# usability= models.IntegerField()
# creativity = models.IntegerField()
# content = models.IntegerField()
# project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='votes', null=True)
# user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Design(models.Model):
"""
model for design measure criterion
"""
design_score = models.IntegerField()
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='design', null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Usability(models.Model):
"""
model for usability
"""
usability_score = models.IntegerField()
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='usability', null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Creativity(models.Model):
"""
model for scoring creativity
"""
creativity_score = models.IntegerField()
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='creativity', null=True)
user = models.ForeignKey(User,on_delete=models.CASCADE, null=True)
class Content(models.Model):
"""
model for scoring content
"""
content_score =models.IntegerField()
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='content', null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) | StarcoderdataPython |
3349345 | <gh_stars>1-10
Ra = 1
kRa = 1000
mRa = 1000000
TAS = 1000000000 | StarcoderdataPython |
8021451 | from subprocess import run, PIPE, STDOUT
from btr.loader import Loader
from btr.utilities import get_outdir_path
import pandas as pd
settings_files = [
'test_data/run_settings/settings_synthetic_Ordinal.json',
'test_data/run_settings/settings_synthetic_Multiclass_Linear.json',
'test_data/run_settings/settings_synthetic_Multiclass_Nonlinear.json']
def test_predict_synthetic():
for s in settings_files:
s = run(['btr-predict',
s,
'-g', 'test_data/hypotheses/synthetic/',
'-o'],
stdout=PIPE,
stderr=STDOUT)
assert s.returncode == 0
def test_score_synthetic():
for s in settings_files:
s = run(['btr-score',
'test_data/run_settings/settings_synthetic_Ordinal.json',
'-g', 'test_data/hypotheses/synthetic/'],
stdout=PIPE,
stderr=STDOUT)
assert s.returncode == 0
def test_score_values_synthetic():
for s in settings_files:
loader = Loader(settings_path=s,
syn_settings_overwrite=False,
use_synapse=False)
folder = get_outdir_path(loader.s)
score_csv = folder + 'score/synthetic_auc.csv'
df = pd.read_csv(score_csv)
df_dict = df.to_dict('records')[0]
assert df_dict['ABCeasy_cols.txt'] >= 0
assert df_dict['ABCeasy_cols.txt'] <= 1
assert df_dict['ABCeasy_cols.txt'] > 0.95
assert df_dict['ABChard_cols.txt'] >= 0
assert df_dict['ABChard_cols.txt'] <= 1
assert df_dict['ABChard_cols.txt'] > 0.95
assert df_dict['BACeasy_cols.txt'] >= 0
assert df_dict['BACeasy_cols.txt'] <= 1
assert df_dict['BACeasy_cols.txt'] > 0.95
assert df_dict['NS_cols.txt'] >= 0
assert df_dict['NS_cols.txt'] <= 1
assert df_dict['NS_cols.txt'] < df_dict['ABCeasy_cols.txt']
assert df_dict['NS_cols.txt'] < df_dict['ABChard_cols.txt']
assert df_dict['NS_cols.txt'] < df_dict['BACeasy_cols.txt']
assert df_dict['NS_cols.txt'] < 0.6
| StarcoderdataPython |
1828524 | """authentik policy signals"""
from django.core.cache import cache
from django.db.models.signals import post_save
from django.dispatch import receiver
from structlog.stdlib import get_logger
from authentik.core.api.applications import user_app_cache_key
from authentik.policies.engine import GAUGE_POLICIES_CACHED
from authentik.root.monitoring import monitoring_set
LOGGER = get_logger()
@receiver(monitoring_set)
# pylint: disable=unused-argument
def monitoring_set_policies(sender, **kwargs):
"""set policy gauges"""
GAUGE_POLICIES_CACHED.set(len(cache.keys("policy_*") or []))
@receiver(post_save)
# pylint: disable=unused-argument
def invalidate_policy_cache(sender, instance, **_):
"""Invalidate Policy cache when policy is updated"""
from authentik.policies.models import Policy, PolicyBinding
if isinstance(instance, Policy):
total = 0
for binding in PolicyBinding.objects.filter(policy=instance):
prefix = f"policy_{binding.policy_binding_uuid.hex}_{binding.policy.pk.hex}*"
keys = cache.keys(prefix)
total += len(keys)
cache.delete_many(keys)
LOGGER.debug("Invalidating policy cache", policy=instance, keys=total)
# Also delete user application cache
keys = cache.keys(user_app_cache_key("*")) or []
cache.delete_many(keys)
| StarcoderdataPython |
103569 | <filename>default_tts.py
"""
Assumes a working docker instance of the parente/espeakbox image.
This should be the case with the docker-compose method of installing Eva.
"""
import sys
import requests
import gossip
from eva import log
from eva import conf
def get_settings_string():
"""
Will eventually pull from Eva configs.
/speech?text=<utterance>
[&pitch=<0,99; default 50>]
[&rate=<80,450; default 175 wpm>]
[&voice=<name; default en>]
[&encoding=<mp3|opus; default mp3>]
"""
pitch = conf['plugins']['default_tts']['config']['pitch']
rate = conf['plugins']['default_tts']['config']['rate']
voice = conf['plugins']['default_tts']['config']['voice']
encoding = conf['plugins']['default_tts']['config']['encoding']
settings_string = '&pitch=' + str(pitch) + \
'&rate=' + str(rate) + \
'&voice=' + voice + \
'&encoding=' + encoding
return settings_string
def tts(text):
host = conf['plugins']['default_tts']['config']['tts_host']
port = conf['plugins']['default_tts']['config']['tts_port']
try:
r = requests.get('http://' + host + ':' + str(port) + '/speech?text=' + text + get_settings_string())
return {'audio': r.content, 'content-type': r.headers['content-type']}
except Exception:
log.error('Could not connect to default text-to-speech service: %s' %sys.exc_info()[1])
return {'audio': None}
@gossip.register('eva.text_to_speech', provides=['default_tts'])
def text_to_speech(context):
try:
response = tts(context.get_output_text())
context.set_output_audio(response['audio'], response['content-type'])
except:
log.error('Could not convert text to speech with default_tts')
| StarcoderdataPython |
340419 | """
my NN library
(based on Yoav's)
"""
import _dynet as dynet
import numpy as np
import array
from bilstm_aux.lib.constants import START_TAG, END_TAG
from scipy import linalg
def init_dynet(seed):
"""initialize DyNet"""
dyparams = dynet.DynetParams()
# Fetch the command line arguments (optional)
dyparams.from_args()
# Set some parameters manualy (see the command line arguments documentation)
dyparams.set_random_seed(seed)
# Initialize with the given parameters
dyparams.init()
return dyparams
def pick_neg_log(pred, gold):
return -dynet.log(dynet.pick(pred, gold))
def is_in_dict(word, dictionary):
""" dictionary lookup """
if word in dictionary:
return dictionary[word]
if word.lower() in dictionary:
return dictionary[word.lower()]
return False
## NN classes
class SequencePredictor:
def __init__(self):
pass
def predict_sequence(self, inputs):
raise NotImplementedError("SequencePredictor predict_sequence: Not Implemented")
class OutputSequencePredictor:
def __init__(self):
pass
def predict_sequence(self, seq, inputs):
raise NotImplementedError("SequencePredictor predict_sequence: Not Implemented")
class FFSequencePredictor(OutputSequencePredictor):
"""
Local output predictor (softmax per tag)
"""
def __init__(self, tag2index, network_builder):
self.network_builder = network_builder
self.tag2index = tag2index
self.index2tag = {self.tag2index[t]: t for t in self.tag2index.keys()}
def prune_softmax(self, softmax_distr, word, dictionary):
## implement type-constraint decoding
if is_in_dict(word, dictionary):
allowed_tag_indices = [self.tag2index[tag] for tag in is_in_dict(word, dictionary) if tag in self.tag2index]
if len(allowed_tag_indices) > 1:
for tag_idx in self.index2tag.keys():
if tag_idx not in allowed_tag_indices:
softmax_distr[tag_idx] = 0
# print(len([x for x in softmax_distr if x ==0]))
return softmax_distr
def predict_sequence(self, seq, inputs, train=False, output_confidences=False, unk_tag=None, dictionary=None, type_constraint=False, **kwargs):
output = [self.network_builder(x, **kwargs) for x in inputs]
if not train:
if dictionary and type_constraint: # to type constraint decoding only during testing
pred_tags = []
for i, o in enumerate(output):
softmax_distr = o.npvalue()
word = seq.words[i]
softmax_distr = self.prune_softmax(softmax_distr, word, dictionary)
tag_best = self.index2tag[np.argmax(softmax_distr)]
pred_tags.append(tag_best)
seq.pred_tags = pred_tags
else:
seq.pred_tags = [self.index2tag[np.argmax(o.npvalue())] for o in output] # logprobs to indices
if output_confidences:
seq.tag_confidences = array.array('f', [np.max(o.npvalue()) for o in output])
if train:
# return loss per tag
gold_tag_indices = array.array('I',[self.tag2index[t] for t in seq.tags])
return dynet.esum([pick_neg_log(pred,gold) for pred, gold in zip(output, gold_tag_indices)])
def save_parameters(self, out_filename):
pass
class CRFSequencePredictor(OutputSequencePredictor):
"""
Global output predictor
"""
def __init__(self, model, num_tags, tag2index, network_builder, viterbi_loss=False):
self.network_builder = network_builder # the per-class layers
self.tag2index = tag2index
self.index2tag = {self.tag2index[t]: t for t in self.tag2index.keys()}
self.viterbi_loss=viterbi_loss
self.num_tags = num_tags
# Transition matrix for tagging layer, transitioning *to* i *from* j.
self.trans_mat = model.add_lookup_parameters((num_tags, num_tags)) # tags x tags
def save_parameters(self, out_filename):
# save transition matrix
OUT = open(out_filename + ".trans.mat", "w")
for tag in self.index2tag.keys():
for tag_prev in self.index2tag.keys():
tag2tag_expression = self.trans_mat[tag_prev][tag]
tag_prev_name = self.index2tag[tag_prev]
tag_i_name = self.index2tag[tag]
OUT.write("{} {} {}\n".format(tag_prev_name, tag_i_name, " ".join([str(x) for x in tag2tag_expression.npvalue()])))
OUT.close()
#np.savetxt(out_filename+'.matrix.out', self.trans_mat.npvalue(), delimiter=',')
print("done.")
def predict_sequence(self, seq, inputs, train=False, output_confidences=False, unk_tag=None, dictionary=None, type_constraint=False, **kwargs):
score_vecs = [self.network_builder(x, **kwargs) for x in inputs]
if not train:
#pred_tag_indices = self.viterbi(start_b, T, end_b, score_vecs)
pred_tag_indices, tag_scores = self.viterbi(score_vecs, unk_tag=unk_tag, dictionary=dictionary)
seq.pred_tags = [self.index2tag[t] for t in pred_tag_indices]
if output_confidences:
print("not implemented")
return
else:
if self.viterbi_loss:
pred_tag_indices, path_score = self.viterbi(score_vecs)
instance_score = path_score #viterbi score
else:
forward_score = self.forward(score_vecs)
instance_score = forward_score
# return loss
gold_tag_indices = array.array('I',[self.tag2index[t] for t in seq.tags])
# decode CRF
gold_score = self.score_sentence(score_vecs, gold_tag_indices)
return instance_score - gold_score
# return normalizer - gold_score
# code adapted from K.Stratos' code basis
def score_sentence(self, score_vecs, tags):
assert(len(score_vecs)==len(tags))
tags.insert(0, START_TAG) # add start
total = dynet.scalarInput(.0)
for i, obs in enumerate(score_vecs):
# transition to next from i and emission
next_tag = tags[i + 1]
total += dynet.pick(self.trans_mat[next_tag],tags[i]) + dynet.pick(obs,next_tag)
total += dynet.pick(self.trans_mat[END_TAG],tags[-1])
return total
# code based on https://github.com/rguthrie3/BiLSTM-CRF
def viterbi(self, observations, unk_tag=None, dictionary=None):
#if dictionary:
# raise NotImplementedError("type constraints not yet implemented for CRF")
backpointers = []
init_vvars = [-1e10] * self.num_tags
init_vvars[START_TAG] = 0 # <Start> has all the probability
for_expr = dynet.inputVector(init_vvars)
trans_exprs = [self.trans_mat[idx] for idx in range(self.num_tags)]
for obs in observations:
bptrs_t = []
vvars_t = []
for next_tag in range(self.num_tags):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
if unk_tag:
best_tag = self.index2tag[best_tag_id]
if best_tag == unk_tag:
next_tag_arr[np.argmax(next_tag_arr)] = 0 # set to 0
best_tag_id = np.argmax(next_tag_arr) # get second best
bptrs_t.append(best_tag_id)
vvars_t.append(dynet.pick(next_tag_expr, best_tag_id))
for_expr = dynet.concatenate(vvars_t) + obs
backpointers.append(bptrs_t)
# Perform final transition to terminal
terminal_expr = for_expr + trans_exprs[END_TAG]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dynet.pick(terminal_expr, best_tag_id)
# Reverse over the backpointers to get the best path
best_path = [best_tag_id] # Start with the tag that was best for terminal
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop() # Remove the start symbol
best_path.reverse()
assert start == START_TAG
# Return best path and best path's score
return best_path, path_score
def forward(self, observations):
# calculate forward pass
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dynet.pick(scores, argmax_score)
max_score_expr_broadcast = dynet.concatenate([max_score_expr] * self.num_tags)
return max_score_expr + dynet.logsumexp_dim((scores - max_score_expr_broadcast),0)
init_alphas = [-1e10] * self.num_tags
init_alphas[START_TAG] = 0
for_expr = dynet.inputVector(init_alphas)
for obs in observations:
alphas_t = []
for next_tag in range(self.num_tags):
obs_broadcast = dynet.concatenate([dynet.pick(obs, next_tag)] * self.num_tags)
next_tag_expr = for_expr + self.trans_mat[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dynet.concatenate(alphas_t)
terminal_expr = for_expr + self.trans_mat[END_TAG]
alpha = log_sum_exp(terminal_expr)
return alpha
class RNNSequencePredictor(SequencePredictor):
def __init__(self, rnn_builder):
"""
rnn_builder: a LSTMBuilder/SimpleRNNBuilder or GRU builder object
"""
self.builder = rnn_builder
def predict_sequence(self, inputs):
s_init = self.builder.initial_state()
return s_init.transduce(inputs)
class BiRNNSequencePredictor(SequencePredictor):
""" a bidirectional RNN (LSTM/GRU) """
def __init__(self, f_builder, b_builder):
self.f_builder = f_builder
self.b_builder = b_builder
def predict_sequence(self, f_inputs, b_inputs):
f_init = self.f_builder.initial_state()
b_init = self.b_builder.initial_state()
forward_sequence = f_init.transduce(f_inputs)
backward_sequence = b_init.transduce(reversed(b_inputs))
return forward_sequence, backward_sequence
class Layer:
""" Class for affine layer transformation or two-layer MLP """
def __init__(self, model, in_dim, output_dim, activation=dynet.tanh, mlp=0, mlp_activation=dynet.rectify):
# if mlp > 0, add a hidden layer of that dimension
self.act = activation
self.mlp = mlp
if mlp:
print('>>> use mlp with dim {} ({})<<<'.format(mlp, mlp_activation))
mlp_dim = mlp
self.mlp_activation = mlp_activation
self.W_mlp = model.add_parameters((mlp_dim, in_dim))
self.b_mlp = model.add_parameters((mlp_dim))
else:
mlp_dim = in_dim
self.W = model.add_parameters((output_dim, mlp_dim))
self.b = model.add_parameters((output_dim))
def __call__(self, x, soft_labels=False, temperature=None, train=False):
if self.mlp:
W_mlp = dynet.parameter(self.W_mlp)
b_mlp = dynet.parameter(self.b_mlp)
act = self.mlp_activation
x_in = act(W_mlp * x + b_mlp)
else:
x_in = x
# from params to expressions
W = dynet.parameter(self.W)
b = dynet.parameter(self.b)
logits = W*x_in + b
if soft_labels and temperature:
# calculate the soft labels smoothed with the temperature
# see Distilling the Knowledge in a Neural Network
elems = dynet.exp(logits / temperature)
return dynet.cdiv(elems, dynet.sum_elems(elems))
if self.act:
return self.act(logits)
return logits
| StarcoderdataPython |
1959923 | <reponame>lmadhusudhanan/contrail-test
import argparse
import ConfigParser
import sys
import string
import json
import os
import re
import platform
from fabric.api import env, run, local, lcd, get
from fabric.context_managers import settings, hide
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from common import log_orig as contrail_logging
from fabric.contrib.files import exists
from cfgm_common import utils
from tcutils.util import istrue, get_build_sku
is_container_env = False
def detect_ostype():
return platform.dist()[0].lower()
def get_address_family():
address_family = os.getenv('AF', 'dual')
# ToDo: CI to execute 'v4' testcases alone for now
if os.getenv('GUESTVM_IMAGE', None):
address_family = 'v4'
return address_family
def get_container_name(containers, host, role):
if containers and host in containers:
return containers[host].get(role)
return None
def get_value_of_key(dct, key, default=None):
if dct and key:
return dct.get(key, default)
return default
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'):
"""
Configure test environment by creating sanity_params.ini and sanity_testbed.json files
"""
print "Configuring test environment"
sys.path.insert(0, contrail_fab_path)
from fabfile.config import testbed
from fabfile.utils.host import get_openstack_internal_vip, \
get_control_host_string, get_authserver_ip, get_admin_tenant_name, \
get_authserver_port, get_env_passwords, get_authserver_credentials, \
get_vcenter_ip, get_vcenter_port, get_vcenter_username, \
get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \
get_authserver_protocol, get_region_name, get_contrail_internal_vip, \
get_openstack_external_vip, get_contrail_external_vip, \
get_apiserver_protocol, get_apiserver_certfile, get_apiserver_keyfile, \
get_apiserver_cafile, get_keystone_insecure_flag, \
get_apiserver_insecure_flag, get_keystone_certfile, get_keystone_keyfile, \
get_keystone_cafile, get_keystone_version
from fabfile.utils.multitenancy import get_mt_enable
from fabfile.utils.interface import get_data_ip
from fabfile.tasks.install import update_config_option, update_js_config
from fabfile.utils.fabos import get_as_sudo
logger = contrail_logging.getLogger(__name__)
def validate_and_copy_file(filename, source_host):
with settings(host_string='%s' %(source_host),
warn_only=True, abort_on_prompts=False):
if exists(filename):
filedir = os.path.dirname(filename)
if not os.path.exists(filedir):
os.makedirs(filedir)
get_as_sudo(filename, filename)
return filename
return ""
cfgm_host = env.roledefs['cfgm'][0]
auth_protocol = get_authserver_protocol()
try:
auth_server_ip = get_authserver_ip()
except Exception:
auth_server_ip = None
auth_server_port = get_authserver_port()
api_auth_protocol = get_apiserver_protocol()
if api_auth_protocol == 'https':
api_certfile = validate_and_copy_file(get_apiserver_certfile(), cfgm_host)
api_keyfile = validate_and_copy_file(get_apiserver_keyfile(), cfgm_host)
api_cafile = validate_and_copy_file(get_apiserver_cafile(), cfgm_host)
api_insecure_flag = get_apiserver_insecure_flag()
else:
api_certfile = ""
api_keyfile = ""
api_cafile = ""
api_insecure_flag = True
cert_dir = os.path.dirname(api_certfile)
if auth_protocol == 'https':
keystone_cafile = validate_and_copy_file(cert_dir + '/' +\
os.path.basename(get_keystone_cafile()), cfgm_host)
keystone_certfile = validate_and_copy_file(cert_dir + '/' +\
os.path.basename(get_keystone_certfile()), cfgm_host)
keystone_keyfile = keystone_certfile
keystone_insecure_flag = istrue(os.getenv('OS_INSECURE', \
get_keystone_insecure_flag()))
else:
keystone_certfile = ""
keystone_keyfile = ""
keystone_cafile = ""
keystone_insecure_flag = True
with settings(warn_only=True), hide('everything'):
with lcd(contrail_fab_path):
if local('git branch').succeeded:
fab_revision = local('git log --format="%H" -n 1', capture=True)
else:
with settings(host_string=cfgm_host), hide('everything'):
fab_revision = run('cat /opt/contrail/contrail_packages/VERSION')
with lcd(test_dir):
if local('git branch').succeeded:
revision = local('git log --format="%H" -n 1', capture=True)
else:
with settings(host_string=cfgm_host), hide('everything'):
revision = run('cat /opt/contrail/contrail_packages/VERSION')
sanity_testbed_dict = {
'hosts': [],
'vgw': [],
'esxi_vms':[],
'vcenter_servers':[],
'hosts_ipmi': [],
'tor':[],
'sriov':[],
'dpdk':[],
'ns_agilio_vrouter':[],
}
sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample'
with open(sample_ini_file, 'r') as fd_sample_ini:
contents_sample_ini = fd_sample_ini.read()
sanity_ini_templ = string.Template(contents_sample_ini)
if not getattr(env, 'test', None):
env.test={}
containers = env.test.get('containers')
traffic_data = env.test.get('traffic_data')
ixia_linux_host_ip = get_value_of_key(traffic_data, 'ixia_linux_host_ip')
ixia_host_ip = get_value_of_key(traffic_data, 'ixia_host_ip')
spirent_linux_host_ip = get_value_of_key(traffic_data, 'spirent_linux_host_ip')
ixia_linux_username = get_value_of_key(traffic_data, 'ixia_linux_username')
ixia_linux_password = get_value_of_key(traffic_data, 'ixia_linux_password')
spirent_linux_username = get_value_of_key(traffic_data, 'spirent_linux_username')
spirent_linux_password = get_value_of_key(traffic_data, 'spirent_linux_password')
if env.get('orchestrator', 'openstack') == 'openstack':
with settings(host_string = env.roledefs['openstack'][0]), hide('everything'):
openstack_host_name = run("hostname")
with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'):
cfgm_host_name = run("hostname")
control_host_names = []
for control_host in env.roledefs['control']:
with settings(host_string = control_host), hide('everything'):
host_name = run("hostname")
control_host_names.append(host_name)
cassandra_host_names = []
if 'database' in env.roledefs.keys():
for cassandra_host in env.roledefs['database']:
with settings(host_string = cassandra_host), hide('everything'):
host_name = run("hostname")
cassandra_host_names.append(host_name)
keystone_version = get_keystone_version()
internal_vip = get_openstack_internal_vip()
external_vip = get_openstack_external_vip()
contrail_internal_vip = get_contrail_internal_vip()
contrail_external_vip = get_contrail_external_vip()
multi_role_test = False
for host_string in env.roledefs['all']:
if host_string in env.roledefs.get('test',[]):
for role in env.roledefs.iterkeys():
if role in ['test','all']:
continue
if host_string in env.roledefs.get(role,[]):
multi_role_test=True
break
if not multi_role_test:
continue
host_ip = host_string.split('@')[1]
with settings(host_string = host_string), hide('everything'):
try:
host_name = run("hostname")
host_fqname = run("hostname -f")
except:
logger.warn('Unable to login to %s'%host_ip)
continue
host_dict = {}
host_dict['ip'] = host_ip
host_dict['data-ip']= get_data_ip(host_string)[0]
if host_dict['data-ip'] == host_string.split('@')[1]:
host_dict['data-ip'] = get_data_ip(host_string)[0]
host_dict['control-ip']= get_control_host_string(host_string).split('@')[1]
host_dict['name'] = host_name
host_dict['fqname'] = host_fqname
host_dict['username'] = host_string.split('@')[0]
host_dict['password'] =get_env_passwords(host_string)
host_dict['roles'] = []
if env.get('qos', {}):
if host_string in env.qos.keys():
role_dict = env.qos[host_string]
host_dict['qos'] = role_dict
if env.get('qos_niantic', {}):
if host_string in env.qos_niantic.keys():
role_dict = env.qos_niantic[host_string]
host_dict['qos_niantic'] = role_dict
if host_string in env.roledefs['openstack']:
role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}}
role_dict['container'] = get_container_name(containers, host_string, 'openstack')
host_dict['roles'].append(role_dict)
if host_string in env.roledefs['cfgm']:
role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}}
role_dict['container'] = get_container_name(containers, host_string, 'controller')
if env.get('orchestrator', 'openstack') == 'openstack':
role_dict['openstack'] = openstack_host_name
host_dict['roles'].append(role_dict)
if host_string in env.roledefs['control']:
role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
role_dict['container'] = get_container_name(containers, host_string, 'controller')
host_dict['roles'].append(role_dict)
if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']:
role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
role_dict['container'] = get_container_name(containers, host_string, 'analyticsdb')
host_dict['roles'].append(role_dict)
if not env.roledefs.get('compute'):
env.roledefs['compute'] = []
if host_string in env.roledefs['compute']:
role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}}
role_dict['container'] = get_container_name(containers, host_string, 'agent')
role_dict['params']['bgp'] = []
if len(env.roledefs['control']) == 1:
role_dict['params']['bgp'] = control_host_names
else:
for control_node in control_host_names:
role_dict['params']['bgp'].append(control_node)
# role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))])
host_dict['roles'].append(role_dict)
if host_string in env.roledefs.get('lb',[]):
role_dict = {'type': 'lb', 'params': {'lb': host_name}}
role_dict['container'] = get_container_name(containers, host_string, 'lb')
host_dict['roles'].append(role_dict)
if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']:
role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} }
role_dict['container'] = get_container_name(containers, host_string, 'analytics')
host_dict['roles'].append(role_dict)
if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']:
role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} }
role_dict['container'] = get_container_name(containers, host_string, 'controller')
host_dict['roles'].append(role_dict)
# Kube managers
if 'contrail-kubernetes' in env.roledefs.keys() and \
host_string in env.roledefs['contrail-kubernetes']:
role_dict = { 'type': 'contrail-kubernetes', 'params': {} }
role_dict['container'] = get_container_name(containers,
host_string, 'contrail-kube-manager')
host_dict['roles'].append(role_dict)
sanity_testbed_dict['hosts'].append(host_dict)
if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw)
#get sriov info
if env.has_key('sriov'):
sanity_testbed_dict['sriov'].append(env.sriov)
#get dpdk info
if env.has_key('dpdk'):
sanity_testbed_dict['dpdk'].append(env.dpdk)
#get k8s info
sanity_testbed_dict['kubernetes'] = env.get('kubernetes', {})
#get ns_agilio_vrouter info
if env.has_key('ns_agilio_vrouter'):
sanity_testbed_dict['ns_agilio_vrouter'].append(env.ns_agilio_vrouter)
# Read ToR config
sanity_tor_dict = {}
if env.has_key('tor_agent'):
sanity_testbed_dict['tor_agent'] = env.tor_agent
# Read any tor-host config
if env.has_key('tor_hosts'):
sanity_testbed_dict['tor_hosts'] = env.tor_hosts
if env.has_key('xmpp_auth_enable'):
sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable
if env.has_key('xmpp_dns_auth_enable'):
sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable
if env.has_key('metadata_ssl_enable'):
sanity_testbed_dict['metadata_ssl_enable'] = env.metadata_ssl_enable
if env.has_key('dm_mx'):
sanity_testbed_dict['dm_mx'] = env.dm_mx
# Read any MX config (as physical_router )
if env.has_key('physical_routers'):
sanity_testbed_dict['physical_routers'] = env.physical_routers
esxi_hosts = getattr(testbed, 'esxi_hosts', None)
if esxi_hosts:
for esxi in esxi_hosts:
host_dict = {}
host_dict['ip'] = esxi_hosts[esxi]['ip']
host_dict['data-ip'] = host_dict['ip']
host_dict['control-ip'] = host_dict['ip']
host_dict['name'] = esxi
host_dict['username'] = esxi_hosts[esxi]['username']
host_dict['password'] = esxi_hosts[esxi]['password']
#Its used for vcenter only mode provosioning for contrail-vm
#Its not needed for vcenter_gateway mode, hence might not be there in testbed.py
if 'contrail_vm' in esxi_hosts[esxi]:
host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host']
host_dict['roles'] = []
host_dict['type'] = 'esxi'
sanity_testbed_dict['hosts'].append(host_dict)
sanity_testbed_dict['esxi_vms'].append(host_dict)
vcenter_servers = env.get('vcenter_servers')
if vcenter_servers:
for vcenter in vcenter_servers:
sanity_testbed_dict['vcenter_servers'].append(vcenter_servers[vcenter])
orch = getattr(env, 'orchestrator', 'openstack')
deployer = getattr(env, 'deployer', 'openstack')
#get other orchestrators (vcenter etc) info if any
slave_orch = None
if env.has_key('other_orchestrators'):
sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators
for k,v in env.other_orchestrators.items():
if v['type'] == 'vcenter':
slave_orch = 'vcenter'
# get host ipmi list
if env.has_key('hosts_ipmi'):
sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi)
# Setting slave orch to k8s when key present
if env.has_key('kubernetes'):
if sanity_testbed_dict['kubernetes']['mode'] == 'nested':
slave_orch = 'kubernetes'
# generate json file and copy to cfgm
sanity_testbed_json = json.dumps(sanity_testbed_dict)
stack_user = os.getenv('STACK_USER', env.get('stack_user', env.test.get('stack_user', '')))
stack_password = os.getenv('STACK_PASSWORD',
env.test.get('stack_password',''))
stack_tenant = os.getenv('STACK_TENANT', env.get('stack_tenant',
env.test.get('stack_tenant', '')))
stack_domain = os.getenv('STACK_DOMAIN',
env.get('stack_domain', env.test.get('stack_domain', '')))
use_project_scoped_token = env.test.get('use_project_scoped_token', '')
if not env.has_key('domain_isolation'):
env.domain_isolation = False
if not env.has_key('cloud_admin_domain'):
env.cloud_admin_domain = 'Default'
if not env.has_key('cloud_admin_user'):
env.cloud_admin_user = 'admin'
if not env.has_key('cloud_admin_password'):
env.cloud_admin_password = env.get('<PASSWORD>password')
domain_isolation = os.getenv('DOMAIN_ISOLATION',
env.test.get('domain_isolation', env.domain_isolation))
cloud_admin_domain = os.getenv('CLOUD_ADMIN_DOMAIN',
env.test.get('cloud_admin_domain', env.cloud_admin_domain))
cloud_admin_user = os.getenv('CLOUD_ADMIN_USER',
env.test.get('cloud_admin_user', env.cloud_admin_user))
cloud_admin_password = os.getenv('<PASSWORD>_ADMIN_PASSWORD',
env.test.get('cloud_admin_password', env.cloud_admin_password))
tenant_isolation = os.getenv('TENANT_ISOLATION',
env.test.get('tenant_isolation', ''))
stop_on_fail = env.get('stop_on_fail', False)
mail_to = os.getenv('MAIL_TO', env.test.get('mail_to', ''))
log_scenario = env.get('log_scenario', 'Sanity')
stack_region_name = get_region_name()
admin_user, admin_password = get_authserver_credentials()
if orch == 'kubernetes':
admin_tenant = 'default'
else:
admin_tenant = get_admin_tenant_name()
# Few hardcoded variables for sanity environment
# can be removed once we move to python3 and configparser
webserver_host = os.getenv('WEBSERVER_HOST',
env.test.get('webserver_host',''))
webserver_user = os.getenv('WEBSERVER_USER',
env.test.get('webserver_user', ''))
webserver_password = os.getenv('WEBSERVER_PASSWORD',
env.test.get('webserver_password', ''))
webserver_log_path = os.getenv('WEBSERVER_LOG_PATH',
env.test.get('webserver_log_path', '/var/www/contrail-test-ci/logs/'))
webserver_report_path = os.getenv('WEBSERVER_REPORT_PATH',
env.test.get('webserver_report_path', '/var/www/contrail-test-ci/reports/'))
webroot = os.getenv('WEBROOT', env.test.get('webroot', 'contrail-test-ci'))
mail_server = os.getenv('MAIL_SERVER', env.test.get('mail_server', ''))
mail_port = os.getenv('MAIL_PORT', env.test.get('mail_port', '25'))
fip_pool_name = os.getenv('FIP_POOL_NAME',
env.test.get('fip_pool_name', 'floating-ip-pool'))
public_virtual_network = os.getenv('PUBLIC_VIRTUAL_NETWORK',
env.test.get('public_virtual_network', 'public'))
public_tenant_name = os.getenv('PUBLIC_TENANT_NAME',
env.test.get('public_tenant_name', 'admin'))
fixture_cleanup = os.getenv('FIXTURE_CLEANUP',
env.test.get('fixture_cleanup', 'yes'))
generate_html_report = os.getenv('GENERATE_HTML_REPORT',
env.test.get('generate_html_report', 'True'))
keypair_name = os.getenv('KEYPAIR_NAME',
env.test.get('keypair_name', 'contrail_key'))
mail_sender = os.getenv('MAIL_SENDER', env.test.get('mail_sender', '<EMAIL>'))
discovery_ip = os.getenv('DISCOVERY_IP', env.test.get('discovery_ip', ''))
config_api_ip = os.getenv('CONFIG_API_IP', env.test.get('config_api_ip', ''))
analytics_api_ip = os.getenv('ANALYTICS_API_IP',
env.test.get('analytics_api_ip', ''))
discovery_port = os.getenv('DISCOVERY_PORT',
env.test.get('discovery_port', ''))
config_api_port = os.getenv('CONFIG_API_PORT',
env.test.get('config_api_port', ''))
analytics_api_port = os.getenv('ANALYTICS_API_PORT',
env.test.get('analytics_api_port', ''))
control_port = os.getenv('CONTROL_PORT', env.test.get('control_port', ''))
dns_port = os.getenv('DNS_PORT', env.test.get('dns_port', ''))
agent_port = os.getenv('AGENT_PORT', env.test.get('agent_port', ''))
user_isolation = os.getenv('USER_ISOLATION',
env.test.get('user_isolation', False if stack_user else True))
neutron_username = os.getenv('NEUTRON_USERNAME',
env.test.get('neutron_username', None))
availability_zone = os.getenv('AVAILABILITY_ZONE',
env.test.get('availability_zone', None))
ci_flavor = os.getenv('CI_FLAVOR', env.test.get('ci_flavor', None))
kube_config_file = env.test.get('kube_config_file',
'/etc/kubernetes/admin.conf')
openshift_src_config_file = env.test.get('openshift_src_config_file',
'/root/.kube/config')
use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False)
router_asn = getattr(testbed, 'router_asn', '')
public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '')
public_vn_subnet = getattr(testbed, 'public_vn_subnet', '')
ext_routers = getattr(testbed, 'ext_routers', '')
router_info = str(ext_routers)
fabric_gw = getattr(testbed, 'fabric_gw', '')
fabric_gw_info = str(fabric_gw)
test_verify_on_setup = getattr(env, 'test_verify_on_setup', True)
webui = getattr(testbed, 'webui', False)
horizon = getattr(testbed, 'horizon', False)
ui_config = getattr(testbed, 'ui_config', False)
ui_browser = getattr(testbed, 'ui_browser', False)
if not env.has_key('openstack'):
env.openstack = {}
if not env.has_key('cfgm'):
env.cfgm = {}
config_amqp_ip = env.openstack.get('amqp_host', '')
if config_amqp_ip:
config_amqp_ips = [config_amqp_ip]
else:
config_amqp_ips = []
# If amqp details are in env.cfgm as well, use that
config_amqp_port = env.cfgm.get('amqp_port', '5672')
config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips)
key_filename = env.get('key_filename', '')
pubkey_filename = env.get('pubkey_filename', '')
vcenter_dc = ''
if orch == 'vcenter' or slave_orch== 'vcenter':
public_tenant_name='vCenter'
if env.has_key('vcenter_servers'):
if env.vcenter_servers:
for vc in env.vcenter_servers:
for dc in env.vcenter_servers[vc]['datacenters']:
vcenter_dc = dc
#global controller
gc_host_mgmt = getattr(testbed, 'gc_host_mgmt', '')
gc_host_control_data = getattr(testbed, 'gc_host_control_data', '')
gc_user_name = getattr(testbed, 'gc_user_name', '')
gc_user_pwd = getattr(testbed, 'gc_user_pwd', '')
keystone_password = getattr(testbed, 'keystone_password', '')
sanity_params = sanity_ini_templ.safe_substitute(
{'__testbed_json_file__' : 'sanity_testbed.json',
'__keystone_version__' : keystone_version,
'__use_project_scoped_token__': use_project_scoped_token,
'__nova_keypair_name__' : keypair_name,
'__orch__' : orch,
'__deployer__' : deployer,
'__admin_user__' : admin_user,
'__admin_password__' : <PASSWORD>,
'__admin_tenant__' : admin_tenant,
'__domain_isolation__' : domain_isolation,
'__cloud_admin_domain__' : cloud_admin_domain,
'__cloud_admin_user__' : cloud_admin_user,
'__cloud_admin_password__': <PASSWORD>,
'__tenant_isolation__' : tenant_isolation,
'__stack_user__' : stack_user,
'__stack_password__' : stack_password,
'__auth_ip__' : auth_server_ip,
'__auth_port__' : auth_server_port,
'__auth_protocol__' : auth_protocol,
'__stack_region_name__' : stack_region_name,
'__stack_tenant__' : stack_tenant,
'__stack_domain__' : stack_domain,
'__multi_tenancy__' : get_mt_enable(),
'__address_family__' : get_address_family(),
'__log_scenario__' : log_scenario,
'__generate_html_report__': generate_html_report,
'__fixture_cleanup__' : fixture_cleanup,
'__key_filename__' : key_filename,
'__pubkey_filename__' : pubkey_filename,
'__webserver__' : webserver_host,
'__webserver_user__' : webserver_user,
'__webserver_password__' : webserver_password,
'__webserver_log_dir__' : webserver_log_path,
'__webserver_report_dir__': webserver_report_path,
'__webroot__' : webroot,
'__mail_server__' : mail_server,
'__mail_port__' : mail_port,
'__sender_mail_id__' : mail_sender,
'__receiver_mail_id__' : mail_to,
'__http_proxy__' : env.get('http_proxy', ''),
'__ui_browser__' : ui_browser,
'__ui_config__' : ui_config,
'__horizon__' : horizon,
'__webui__' : webui,
'__devstack__' : False,
'__public_vn_rtgt__' : public_vn_rtgt,
'__router_asn__' : router_asn,
'__router_name_ip_tuples__': router_info,
'__fabric_gw_name_ip_tuple__': fabric_gw_info,
'__public_vn_name__' : fip_pool_name,
'__public_virtual_network__':public_virtual_network,
'__public_tenant_name__' :public_tenant_name,
'__public_vn_subnet__' : public_vn_subnet,
'__test_revision__' : revision,
'__fab_revision__' : fab_revision,
'__test_verify_on_setup__': test_verify_on_setup,
'__stop_on_fail__' : stop_on_fail,
'__ha_setup__' : getattr(testbed, 'ha_setup', ''),
'__ipmi_username__' : getattr(testbed, 'ipmi_username', ''),
'__ipmi_password__' : getattr(testbed, 'ipmi_password', ''),
'__contrail_internal_vip__' : contrail_internal_vip,
'__contrail_external_vip__' : contrail_external_vip,
'__internal_vip__' : internal_vip,
'__external_vip__' : external_vip,
'__vcenter_dc__' : vcenter_dc,
'__vcenter_server__' : get_vcenter_ip(),
'__vcenter_port__' : get_vcenter_port(),
'__vcenter_username__' : get_vcenter_username(),
'__vcenter_password__' : get_vcenter_password(),
'__vcenter_datacenter__' : get_vcenter_datacenter(),
'__vcenter_compute__' : get_vcenter_compute(),
'__use_devicemanager_for_md5__' : use_devicemanager_for_md5,
'__discovery_port__' : discovery_port,
'__config_api_port__' : config_api_port,
'__analytics_api_port__' : analytics_api_port,
'__control_port__' : control_port,
'__dns_port__' : dns_port,
'__vrouter_agent_port__' : agent_port,
'__discovery_ip__' : discovery_ip,
'__config_api_ip__' : config_api_ip,
'__analytics_api_ip__' : analytics_api_ip,
'__user_isolation__' : user_isolation,
'__neutron_username__' : neutron_username,
'__availability_zone__' : availability_zone,
'__ci_flavor__' : ci_flavor,
'__config_amqp_ips__' : ','.join(config_amqp_ips),
'__config_amqp_port__' : config_amqp_port,
'__api_auth_protocol__' : api_auth_protocol,
'__api_certfile__' : api_certfile,
'__api_keyfile__' : api_keyfile,
'__api_cafile__' : api_cafile,
'__api_insecure_flag__' : api_insecure_flag,
'__keystone_certfile__' : keystone_certfile,
'__keystone_keyfile__' : keystone_keyfile,
'__keystone_cafile__' : keystone_cafile,
'__keystone_insecure_flag__': keystone_insecure_flag,
'__gc_host_mgmt__' : gc_host_mgmt,
'__gc_host_control_data__': gc_host_control_data,
'__gc_user_name__' : gc_user_name,
'__gc_user_pwd__' : <PASSWORD>,
'__keystone_password__' : <PASSWORD>,
'__slave_orch__' : slave_orch,
'__ixia_linux_host_ip__' : ixia_linux_host_ip,
'__ixia_host_ip__' : ixia_host_ip,
'__spirent_linux_host_ip__': spirent_linux_host_ip,
'__ixia_linux_username__' : ixia_linux_username,
'__ixia_linux_password__' : <PASSWORD>,
'__spirent_linux_username__': spirent_linux_username,
'__spirent_linux_password__': <PASSWORD>,
})
ini_file = test_dir + '/' + 'sanity_params.ini'
testbed_json_file = test_dir + '/' + 'sanity_testbed.json'
with open(ini_file, 'w') as ini:
ini.write(sanity_params)
with open(testbed_json_file,'w') as tb:
tb.write(sanity_testbed_json)
# Create /etc/contrail/openstackrc
if not os.path.exists('/etc/contrail'):
os.makedirs('/etc/contrail')
keycertbundle = None
if keystone_cafile and keystone_keyfile and keystone_certfile:
bundle = '/tmp/keystonecertbundle.pem'
certs = [keystone_certfile, keystone_keyfile, keystone_cafile]
keycertbundle = utils.getCertKeyCaBundle(bundle, certs)
with open('/etc/contrail/openstackrc','w') as rc:
rc.write("export OS_USERNAME=%s\n" % admin_user)
rc.write("export OS_PASSWORD=%<PASSWORD>" % admin_password)
rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant)
rc.write("export OS_REGION_NAME=%s\n" % stack_region_name)
rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol,
auth_server_ip,
auth_server_port))
rc.write("export OS_CACERT=%s\n" % keycertbundle)
rc.write("export OS_CERT=%s\n" % keystone_certfile)
rc.write("export OS_KEY=%s\n" % keystone_keyfile)
rc.write("export OS_INSECURE=%s\n" % keystone_insecure_flag)
rc.write("export OS_NO_CACHE=1\n")
# Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone
config = ConfigParser.ConfigParser()
config.optionxform = str
vnc_api_ini = '/etc/contrail/vnc_api_lib.ini'
if os.path.exists(vnc_api_ini):
config.read(vnc_api_ini)
if 'auth' not in config.sections():
config.add_section('auth')
config.set('auth','AUTHN_TYPE', 'keystone')
config.set('auth','AUTHN_PROTOCOL', auth_protocol)
config.set('auth','AUTHN_SERVER', auth_server_ip)
config.set('auth','AUTHN_PORT', auth_server_port)
if keystone_version == 'v3':
config.set('auth','AUTHN_URL', '/v3/auth/tokens')
else:
config.set('auth','AUTHN_URL', '/v2.0/tokens')
if api_auth_protocol == 'https':
if 'global' not in config.sections():
config.add_section('global')
config.set('global','certfile', api_certfile)
config.set('global','cafile', api_cafile)
config.set('global','keyfile', api_keyfile)
config.set('global','insecure',api_insecure_flag)
if auth_protocol == 'https':
if 'auth' not in config.sections():
config.add_section('auth')
config.set('auth','certfile', keystone_certfile)
config.set('auth','cafile', keystone_cafile)
config.set('auth','keyfile', keystone_keyfile)
config.set('auth','insecure', keystone_insecure_flag)
with open(vnc_api_ini,'w') as f:
config.write(f)
# Get kube config file to the testrunner node
if orch == 'kubernetes' or slave_orch == 'kubernetes':
if not os.path.exists(kube_config_file):
dir_name = os.path.dirname(kube_config_file)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with settings(host_string = env.kubernetes['master']):
if deployer == 'openshift' :
get(openshift_src_config_file, kube_config_file)
else:
get(kube_config_file, kube_config_file)
# If webui = True, in testbed, setup webui for sanity
if webui:
sku = get_build_sku(cfgm_host)
update_config_option('openstack', '/etc/keystone/keystone.conf',
'token', 'expiration',
'86400','keystone', sku)
update_js_config('webui', '/etc/contrail/config.global.js',
'contrail-webui', container=is_container_env)
open_delimiters = ['(', '[', '{']
close_delimiters = [')', ']', '}']
def getindices(string):
delimiters = open_delimiters + close_delimiters
pattern = '{}'.format('|'.join(map(re.escape, delimiters)))
indices = [(it.start(), string[it.start()])
for it in re.finditer(pattern, string, re.M|re.S)]
return indices
def get_section(string, pattern):
block = list()
match = re.search(pattern, string, re.M|re.S)
if not match:
return None, None
match_end = match.end()
indices = getindices(string)
for index in range(len(indices)):
delimiter_tuple = indices[index]
if delimiter_tuple[0] < match_end:
continue
if delimiter_tuple[1] in open_delimiters:
block.append(delimiter_tuple[0])
else:
if len(block) == 1:
return (match.start(), delimiter_tuple[0]+1)
block.pop()
return (None, None)
def testbed_format_conversion(path='/opt/contrail/utils'):
tb_file = path + '/fabfile/testbeds/testbed.py'
tb_file_tmp = path + '/fabfile/testbeds/testbed_new.py'
#check if file already has both parameter version.
with open(tb_file) as fd:
tb = fd.read()
# Trim the comments
tb = re.sub('#.*\n', '\n', tb)
tb = re.sub('^\s*\n', '', tb, flags=re.M)
with open(tb_file+'.cmp', 'w') as fd:
fd.write(tb)
# Find roledefs section
start, end = get_section(tb, 'env.roledefs')
if not start:
return True
global is_container_env
is_container_env = True
block = tb[start:end]
# Find contrail-controller section
match = re.search('contrail-controller.*?].*?,', block, re.M|re.S)
if not match:
return True
ctrl_start = match.start()
ctrl_end = match.end()
ctrl_block = block[ctrl_start-1:ctrl_end]
# Replace role names
ctrl_block = ctrl_block.replace('contrail-controller', 'cfgm') + \
ctrl_block.replace('contrail-controller', 'control') + \
ctrl_block.replace('contrail-controller', 'webui')
roledef_block = block[:ctrl_start-1] + ctrl_block + block[ctrl_end:]
roledef_block = roledef_block.replace('contrail-analyticsdb', 'database')
roledef_block = roledef_block.replace('contrail-analytics', 'collector')
roledef_block = roledef_block.replace('contrail-compute', 'compute')
roledef_block = roledef_block.replace('contrail-lb', 'lb')
new_tb = tb[:start] + roledef_block + tb[end:]
with open(tb_file_tmp, 'w') as fd:
fd.write(new_tb)
env.mytestbed = 'testbed_new'
# end testbed_format_conversion
def main(argv=sys.argv):
ap = argparse.ArgumentParser(
description='Configure test environment')
ap.add_argument('contrail_test_directory', type=str,
help='contrail test directory')
ap.add_argument('-p','--contrail-fab-path', type=str, default='/opt/contrail/utils',
help='Contrail fab path on local machine')
args = ap.parse_args()
testbed_format_conversion(args.contrail_fab_path)
configure_test_env(args.contrail_fab_path, args.contrail_test_directory)
if __name__ == "__main__":
sys.exit(not main(sys.argv))
| StarcoderdataPython |
357935 | """
Classes for retrieving tiles from different providers.
"""
import abc
import io
import logging
import os
import random
import numpy as np
import PIL.Image
from requests_cache import CachedSession
from . import fs_cache
CACHE_DIR = os.path.expanduser( '~/.vismap-tiles')
logger = logging.getLogger(__name__)
_session = CachedSession(backend=fs_cache.FSCache(CACHE_DIR))
# mapping of class name: TileProvider. Built up with register() decorator.
providers = {}
def register(cls):
"""Register a class as a tile provider."""
providers[cls.__name__] = cls
return cls
class TileNotFoundError(Exception):
pass
class TileProvider(abc.ABC):
"""Class which knows how to get tiles, given a z,x,y location
"""
@abc.abstractmethod
def url(self, z, x, y):
"""
For a given zoom, x, and y index, return the URL for fetching the tile.
"""
def get_tile(self, z, x, y):
"""Return tile as an array of rgb values"""
x = x % (2 ** z)
url = self.url(z, x, y)
logger.debug('retrieving tile from %s', url)
resp = _session.get(url)
if resp.status_code != 200:
msg = 'Could not retrieve tile for z={}, x={}, y={}'
msg = msg.format(z, x, y)
raise TileNotFoundError(msg)
img_bytes = resp.content
img = PIL.Image.open(io.BytesIO(img_bytes))
rgba = img.convert('RGBA')
# flip so that when displaying with Vispy everything shows up
# right-side-up.
rgba = np.flip(rgba, 0)
return rgba
@property
@abc.abstractmethod
def attribution(self):
""""""
pass
class StamenBase(TileProvider):
"""The basic Stamen maps"""
def url(self, z, x, y):
url = 'http://c.tile.stamen.com/{}/{}/{}/{}.png'
url = url.format(self.map_name, z, x, y)
return url
attribution = 'Map tiles by Stamen Design, under CC BY 3.0. Data '
attribution += 'by OpenStreetMap, under ODbL'
@register
class StamenToner(StamenBase):
map_name = 'toner'
@register
class StamenLite(StamenBase):
map_name = 'toner-lite'
@register
class StamenTerrain(StamenBase):
map_name = 'terrain'
@register
class StamenWatercolor(StamenBase):
map_name = 'watercolor'
attribution = 'Map tiles by Stamen Design, under CC BY 3.0. '
attribution += 'Data by OpenStreetMap, under CC BY SA'
class MapStackBase(TileProvider):
"""Map Stack allows lots of transformations of Stamen tiles
Subclasses must provide a "transform" attribute on the class.
"""
def url(self, z, x, y):
url = 'http://d.sm.mapstack.stamen.com/{}/{}/{}/{}.png'
url = url.format(self.transform, z, x, y)
return url
attribution = 'Tiles by MapBox, Data © OpenStreetMap contributors\n'
attribution += 'Tiles by Stamen Design, under CC-BY 3.0 Data © '
attribution += 'OpenStreetMap contributors, under CC-BY-SA'
@register
class FadedWatercolor(MapStackBase):
transform = '((watercolor,$fff[hsl-saturation@50],$ff5500[hsl-color@30]),(naip,$fff[hsl-saturation@20],mapbox-water[destination-out])[overlay])'
@register
class CoolBlue(MapStackBase):
transform = '(toner-lite,$fff[difference],$000[@40],$fff[hsl-saturation@40],$5999a6[hsl-color],buildings[destination-out])[hsl-saturation@90]'
@register
class BigMapOfBlue(MapStackBase):
"""Named by my four-year-old son."""
transform = '(watercolor,$fff[difference],$81e3f7[hsl-color])'
@register
class StamenTonerInverted(MapStackBase):
"""Inverted colors, otherwise same as StamenToner"""
transform = '(toner,$fff[difference])'
class CartodbBase(TileProvider):
"""Subclasses of CartodbBase must provide a `map_name` attribute
on the class"""
def url(self, z, x, y):
base = 'http://cartodb-basemaps-1.global.ssl.fastly.net/{}/{}/{}/{}.png'
return base.format(self.map_name, z, x, y)
attribution = 'Copyright OpenStreetMap; Copyright CartoDB'
@register
class CartodbDark(CartodbBase):
map_name = 'dark_all'
@register
class Mapnik(TileProvider):
def url(self, z, x, y):
url = 'http://a.tile.openstreetmap.org/{}/{}/{}.png'
return url.format(z, x, y)
attribution = 'Copyright OpenStreetMap'
@register
class OpenTopMap(TileProvider):
def url(self, z, x, y):
url = 'http://a.tile.opentopomap.org/{}/{}/{}.png'
return url.format(z, x, y)
attribution = ('Map data: Copyright OpenStreetMap, SRTM \n '
'Map style: Copyright OpenTopoMap CC-BY-SA')
@register
class EsriWorldImagery(TileProvider):
def url(self, z, x, y):
url ='http://server.arcgisonline.com/ArcGIS/rest/services' \
'/World_Imagery/MapServer/tile/{}/{}/{}'
return url.format(z, y, x)
attribution = \
'Tiles copyright Esri -- Source: Esri, i-cubed, USDA, USGS, AEX, ' \
'GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User ' \
'Community'
def random_provider():
"""Return a random tile provider class"""
return random.choice(list(providers.values()))()
| StarcoderdataPython |
11218710 | <filename>client/states/invited.py
from client.states import logged_in, waiting, playing
from client.board import Board, Mark
from client.states.base import State
from random import random
class InvitedState(State):
def handle_input_command(self, command):
if command == "y":
self._handle_invite_accepted()
else:
self._handle_invite_refused()
def _handle_invite_accepted(self):
is_player_first = random() > 0.5
if is_player_first:
self.client.opponent.accept_game_and_wait()
self.client.mark = Mark.X
self.client.change_state(playing.PlayingState(self.client))
else:
self.client.opponent.accept_game_and_play()
self.client.mark = Mark.O
self.client.change_state(waiting.WaitingState(self.client))
self.client.opponent.start_measure_delay()
self.client.board = Board(self.client.mark)
def _handle_invite_refused(self):
self.client.opponent.refuse_game()
self.client.change_state(logged_in.LoggedInState(self.client))
| StarcoderdataPython |
317313 | """ models module """
import numpy as np
import scipy.integrate as sci
import itertools
from core import utils
from core import fitting
class Model(object):
""" Parent Class for the charge carrier recombination models """
def __init__(self, ids, units, units_html, factors, fvalues, gvalues, gvalues_range, n_keys, n_init, conc_ca_ids,
param_filters):
""" Object constructor
:param list ids: parameter ids
:param dict units: parameter units
:param dict units_html: parameter units (html)
:param dict factors: parameter factors
:param dict fvalues: parameter fixed values
:param dict gvalues: parameter guess values
:param dict gvalues_range: parameter guess values range """
self.s_display_keys = ids.copy() # single value display keys
self.m_display_keys = ['y_0', 'I'] # multiple values display keys
self.ids = ids + ['y_0', 'I']
self.units = utils.merge_dicts(units, {'y_0': '', 'I': ''})
self.units_html = utils.merge_dicts(units_html, {'y_0': '', 'I': ''})
self.factors = utils.merge_dicts(factors, {'y_0': 1., 'I': 1})
self.fvalues = utils.merge_dicts(fvalues, {'y_0': 0., 'I': 1.})
self.gvalues = utils.merge_dicts(gvalues, {'y_0': 0., 'I': 1.})
self.gvalues_range = utils.merge_dicts(gvalues_range, {'y_0': [0.], 'I': [1.]})
self.detached_parameters = ['y_0', 'I']
self.n_keys = n_keys
self.n_init = n_init
self.conc_ca_ids = conc_ca_ids
self.param_filters = param_filters
# Quantities display
self.quantities = {'k_B': 'Bimolecular recombination rate constant', 'k_T': 'Trapping rate constant',
'k_D': 'Detrapping rate constant', 'k_A': 'Auger recombination rate constant',
'N_T': 'Trap state concentration', 'p_0': 'Doping concentration', 'y_0': 'Intensity offset',
'I': 'Intensity factor'}
# Symbols display
self.symbols = {'k_B': 'k_B', 'k_T': 'k_T', 'k_D': 'k_D', 'k_A': 'k_A', 'N_T': 'N_T', 'p_0': 'p_0',
'y_0': 'y_0', 'I': 'I_0'}
self.symbols_html = {'N_T': 'N<sub>T</sub>', 'p_0': 'p<sub>0</sub>', 'k_B': 'k<sub>B</sub>',
'k_T': 'k<sub>T</sub>',
'k_D': 'k<sub>D</sub>', 'k_A': 'k<sub>A</sub>', 'y_0': 'y<sub>0</sub>', 'I': 'I<sub>0</sub>'}
self.labels = {pkey: self.get_parameter_label(self.symbols[pkey], self.units[pkey]) for pkey in
self.ids} # symbol + unit
self.labels_i = {value: key for key, value in self.labels.items()} # inverted dict for param_labels
self.factors_html = {key: utils.get_power_text(self.factors[key], None) for key in self.factors}
# Contributions
self.cbt_labels = {'A': 'Auger (%)', 'B': 'Bimolecular (%)', 'T': 'Trapping (%)', 'D': 'Detrapping (%)'}
self.n_labels_html = {'n_e': 'n<sub>e</sub>', 'n_t': 'n<sub>t</sub>', 'n_h': 'n<sub>h</sub>', 'n': 'n'}
self.n_colors = {'n_e': 'red', 'n_t': 'green', 'n_h': 'blue', 'n': 'black'}
def __eq__(self, other):
""" self == other """
if self.fvalues == other.fvalues and self.gvalues == other.gvalues and self.gvalues_range == other.gvalues_range:
return True
else:
return False
@staticmethod
def get_parameter_label(symbol, unit):
""" Get the parameter label
:param str symbol: symbol
:param str unit: unit """
if unit == '':
return symbol
else:
return symbol + ' (' + unit + ')'
@property
def fixed_values(self):
""" return the dict of fixed values """
return {key: value for key, value in self.fvalues.items() if value is not None}
# -------------------------------------------------- CORE METHODS --------------------------------------------------
def rate_equations(self, *args, **kwargs):
""" Rate equation method """
return {}
def calculate_concentrations(self, t, N_0, p=1, threshold=0.001, **kwargs):
""" Calculate the carrier concentrations as per the BTD model
:param t: time (ns)
:param N_0: initial carrier concentration (cm-3)
:param p: number of pulses
:param threshold: threshold
:param kwargs: keyword arguments passed to the rate equations
Example
-------
>>> _ = BTModel().calculate_concentrations(np.linspace(0, 100), N_0=1e17, k_B=50e-20, k_T=1e-3, k_A=1e-40)
>>> _ = BTModel().calculate_concentrations(np.linspace(0, 100), N_0=1e17, k_B=50e-20, k_T=1e-3, k_A=1e-40, p=1000)
>>> len(_['n'])
5"""
var = {key: np.zeros(len(t)) for key in self.n_keys}
variables = {key: [] for key in self.n_keys}
def rate_equation(x, _t):
""" Rate equation wrapper """
dndt = self.rate_equations(*x, **kwargs)
return [dndt[key] for key in self.n_keys]
for i in range(p):
init = self.n_init(N_0)
var = np.transpose(sci.odeint(rate_equation, [init[key] + var[key][-1] for key in self.n_keys], t))
var = dict(zip(self.n_keys, var))
for key in var:
variables[key].append(var[key])
if threshold > 0. and i > 1:
ca = np.array([np.max(np.abs(variables[key][-2] - var[key]) / N_0) for key in self.conc_ca_ids])
if all(ca < threshold / 100):
break
elif i == p:
raise AssertionError('Attention: threshold condition never reached')
return variables
def calculate_trpl(self, *args, **kwargs):
""" Calculate the TRPL """
return np.array([0])
def calculate_contributions(self, *args, **kwargs):
""" Calculate the contributions """
return dict()
# ------------------------------------------------- FITTING METHODS ------------------------------------------------
def fit(self, xs_data, ys_data, N0s, p0=None):
""" Fit the data using the model
:param list, np.ndarray xs_data: list-like of x data
:param list, np.ndarray ys_data: list-like of y data
:param list N0s: list of initial carrier concentrations (cm-3)
:param None, dict p0: guess values. If None, use the gvalues dict
Example
-------
>>> from core import models
>>> from core import resources
>>> x_data1 = resources.test_file1[0]
>>> ys_data1 = resources.test_file1[1:]
>>> xs_data1 = [x_data1] * len(ys_data1)
>>> N0s1 = [1e15, 1e16, 1e17]
>>> fit1 = BTModel().fit(xs_data1, ys_data1, N0s1) """
# Input parameters processing
fparams = [utils.merge_dicts(self.fixed_values, dict(N_0=n0)) for n0 in N0s] # add N0 to fixed parameters
if p0 is None:
p0 = self.gvalues
# Fitting
fit = fitting.Fit(xs_data, ys_data, self.calculate_trpl, p0, self.detached_parameters, fparams)
# Popts, fitted data, R2
popts = fit.fit()
fit_ydata = fit.calculate_fits(popts)
cod = fit.calculate_rss(fit_ydata)
popt = utils.keep_function_kwargs(self.rate_equations, popts[0]) # optimised values
p0 = utils.keep_function_kwargs(self.rate_equations, utils.merge_dicts(p0, self.fixed_values)) # guess values
# Popts full labels
labels = []
for key in self.s_display_keys + self.m_display_keys:
fstring = ' (fixed)' if key in fparams[0] else '' # add 'fixed' if parameter is fixed
if key in self.s_display_keys:
data = popts[0][key] / self.factors[key]
label = self.quantities[key] + ' (' + self.symbols_html[key] + '): ' + '%.5f' % data + ' ✕ ' \
+ self.factors_html[key] + ' ' + self.units_html[key] + fstring
else:
data = ['%.5f' % popt[key] for popt in popts]
label = self.quantities[key] + ' (' + self.symbols_html[key] + '): ' + ', '.join(data) + fstring
labels.append(label)
labels.append('Coefficient of determination R<sup>2</sup>: ' + str(cod))
# Contributions
contributions = []
for x_data, p in zip(xs_data, popts):
concentration = {key: value[0] for key, value in self.calculate_concentrations(x_data, **p).items()}
kwargs = utils.merge_dicts(p, concentration)
contributions.append({self.cbt_labels[key]: value for key, value in self.calculate_contributions(x_data, **kwargs).items()})
contributions = utils.list_to_dict(contributions)
# All values (guess, optimised, R2 and contributions)
values = dict()
no_disp_keys = []
for key in popt:
label = self.symbols_html[key] + ' (' + self.factors_html[key] + ' ' + self.units_html[key] + ')'
if key in fparams[0]:
values[label + ' (fixed)'] = p0[key] / self.factors[key]
no_disp_keys.append(label + ' (fixed)')
else:
values[label + ' (guess)'] = p0[key] / self.factors[key]
values[label + ' (opt.)'] = popt[key] / self.factors[key]
no_disp_keys.append(label + ' (guess)')
values['R<sub>2</sub>'] = cod
for key in contributions:
values['max. ' + key] = np.max(contributions[key])
return {'popts': popts, 'popt': popt, 'cod': cod, 'contributions': contributions, 'fit_ydata': fit_ydata,
'labels': labels, 'N0s_labels': utils.get_power_labels(N0s), 'p0': p0, 'N0s': N0s, 'values': values,
'no_disp_keys': no_disp_keys, 'xs_data': xs_data, 'ys_data': ys_data}
def get_carrier_accumulation(self, popts, N0s, period):
""" Calculate the carrier accumulation effect on the TRPL
:param list popts: list of optimised parameters
:param list N0s: initial carrier concentrations
:param float, int period: excitation repetition period
Example
-------
>>> from core import models
>>> from core import resources
>>> x_data1 = resources.test_file1[0]
>>> ys_data1 = resources.test_file1[1:]
>>> xs_data1 = [x_data1] * len(ys_data1)
>>> N0s1 = [1e15, 1e16, 1e17]
>>> fit1 = BTModel().fit(xs_data1, ys_data1, N0s1)
>>> list(BTModel().get_carrier_accumulation(fit1['popts'], fit1['N0s'], 1000).values())
[0.02057525783717984, 0.12353636524250478, 0.11835604643279929]"""
labels = utils.get_power_labels(N0s)
nca = dict()
for label, popt, key in zip(labels, popts, N0s):
x = np.insert(np.logspace(-4, np.log10(period), 10001), 0, 0)
pulse1 = self.calculate_trpl(x, **popt)
pulse2 = self.calculate_trpl(x, p=1000, **popt)
nca[label] = np.max(np.abs(pulse1 / pulse1[0] - pulse2 / pulse2[0])) * 100
return nca
def get_carrier_concentrations(self, xs_data, popts, period):
""" Calculate the carrier concentrations from the optimised parameters
:param list xs_data: x axis data
:param list popts: list of optimised parameters
:param float, int period: excitation repetition period in ns
Example
-------
>>> from core import models
>>> from core import resources
>>> x_data1 = resources.test_file1[0]
>>> ys_data1 = resources.test_file1[1:]
>>> xs_data1 = [x_data1] * len(ys_data1)
>>> N0s1 = [1e15, 1e16, 1e17]
>>> fit1 = BTModel().fit(xs_data1, ys_data1, N0s1)
>>> _ = BTModel().get_carrier_concentrations(xs_data1, fit1['popts'], 1000.) """
# Carrier concentrations
if period == '':
x_pulse = x = xs_data
nb_pulses = 1
xlabel = 'Time (ns)'
else:
x = np.array([np.linspace(0, float(period), 1001)] * len(xs_data))
nb_pulses = 100
x_pulse = [np.linspace(0, nb_pulses, len(x_) * nb_pulses) for x_ in x]
xlabel = 'Pulse'
concentrations = []
for x_data, popt in zip(x, popts):
kwargs = {key: popt[key] for key in popt if key not in ('I', 'y_0')}
concentration = self.calculate_concentrations(x_data, p=nb_pulses, **kwargs)
concentrations.append({key: np.concatenate(concentration[key]) for key in concentration})
return x_pulse, xlabel, concentrations
def grid_fitting(self, progressbar, N0s, **kwargs):
""" Run a grid fitting analysis
:param list N0s: initial carrier concentration
:param progressbar: progressbar
:param kwargs: keyword arguments passed to the fit method
>>> from core import models
>>> from core import resources
>>> x_data1 = resources.test_file1[0]
>>> ys_data1 = resources.test_file1[1:]
>>> xs_data1 = [x_data1] * len(ys_data1)
>>> N0s1 = [1e15, 1e16, 1e17]
>>> analysis = BTModel().grid_fitting(None, N0s1, xs_data=xs_data1, ys_data=ys_data1) """
# Filter out the fixed parameters
p0s = {key: self.gvalues_range[key] for key in self.gvalues_range if key not in self.fixed_values}
# Generate all the combination of guess values and filter them
pkeys, pvalues = zip(*p0s.items())
all_p0s = [dict(zip(pkeys, v)) for v in itertools.product(*pvalues)]
all_p0s = utils.filter_dicts(all_p0s, self.param_filters, self.fixed_values)
# Run the fits
fits = []
for i, p0 in enumerate(all_p0s):
# Update the progressbar if provided
if progressbar is not None:
progressbar.progress(i / float(len(all_p0s) - 1))
# Fit the data
try:
fits.append(self.fit(p0=p0, N0s=N0s, **kwargs))
except ValueError:
pass
return fits
@staticmethod
def get_rec_string(process, val=''):
""" Get the recommendation string for the contributions
:param str process: name of the process
:param str val: 'higher' or 'lower' """
string = 'This fit predicts low %s. The values associated with this process may be inacurate.' % process
if val:
string += '\nIt is recommended to measure your sample under %s excitation fluence for this process to become ' \
'significant' % val
return string
class BTModel(Model):
""" Class for the Bimolecular-Trapping model """
def __init__(self):
""" Object constructor """
ids = ['k_T', 'k_B', 'k_A']
units = {'k_B': 'cm3/ns', 'k_T': 'ns-1', 'k_A': 'cm6/ns-1'}
units_html = {'k_B': 'cm<sup>3</sup>/ns', 'k_T': 'ns<sup>-1</sup>', 'k_A': 'cm<sup>6</sup>/ns'}
factors = {'k_B': 1e-20, 'k_T': 1e-3, 'k_A': 1e-40}
fvalues = {'k_T': None, 'k_B': None, 'k_A': 0.}
gvalues = {'k_T': 0.001, 'k_B': 1e-20, 'k_A': 1e-40}
gvalues_range = {'k_B': [1e-20, 1e-18], 'k_T': [1e-4, 1e-2], 'k_A': [1e-32, 1e-30]}
n_keys = ('n',)
n_init = lambda N_0: {'n': N_0}
conc_ca_ids = ('n',)
Model.__init__(self, ids, units, units_html, factors, fvalues, gvalues, gvalues_range, n_keys, n_init,
conc_ca_ids, [])
@staticmethod
def calculate_contributions(t, k_T, k_B, k_A, n, **kwargs):
""" Calculate the total contributions to the TRPL
:param k_T: trapping rate constant (ns-1)
:param k_B: bimolecular rate constant (cm3/ns)
:param k_A: Auger rate constant (cm6/ns)
:param n: carrier concentration (cm-3)
:param t: time (ns)"""
T = sci.trapz(k_T * n ** 2, t)
B = sci.trapz(k_B * n ** 3, t)
A = sci.trapz(k_A * n ** 4, t)
S = T + B + A
return {'T': T / S * 100, 'B': B / S * 100, 'A': A / S * 100}
@staticmethod
def rate_equations(n, k_T, k_B, k_A, **kwargs):
""" Rate equation of the BT model
:param n: carrier concentration (cm-3)
:param k_T: trapping rate (ns-1)
:param k_B: bimolecular recombination rate (cm3/ns)
:param k_A: Auger recombination rate (cm6/ns) """
return {'n': - k_T * n - k_B * n ** 2 - k_A * n ** 3}
def calculate_trpl(self, t, N_0, I, y_0, **kwargs):
""" Calculate the normalised TRPL intensity using the BT model
:param t: time (ns)
:param N_0: initial carrier concentration
:param y_0: background intensity
:param I: amplitude factor
:param kwargs: keyword arguments passed to the calculate_concentrations function
Examples
--------
>>> _ = BTModel().calculate_trpl(np.linspace(0, 100), N_0=1e17, p=1, k_B=50e-20, k_T=1e-3, k_A=1e-40, I=1, y_0=0)
>>> _ = BTModel().calculate_concentrations(np.linspace(0, 100), N_0=1e17, k_B=50e-20, k_T=1e-3, k_A=1e-40, p=1000)"""
n = self.calculate_concentrations(t, N_0, **kwargs)['n'][-1]
I_TRPL = n ** 2 / N_0
return I * I_TRPL / I_TRPL[0] + y_0
def get_recommendations(self, contributions, threshold=10):
""" Get recommendations for the contributions """
recs = []
for key in contributions:
if np.max(contributions[key]) < threshold:
if 'Trapping' in key and self.fvalues.get('k_T', 1) != 0:
recs.append(self.get_rec_string('trapping', 'lower'))
elif 'Bimolecular' in key and self.fvalues.get('k_B', 1) != 0:
recs.append(self.get_rec_string('bimolecular', 'higher'))
elif 'Auger' in key and self.fvalues.get('k_A', 1) != 0:
recs.append(self.get_rec_string('Auger', 'higher'))
return recs
class BTDModel(Model):
""" Class for the Bimolecular-Trapping-Detrapping model """
def __init__(self):
ids = ['k_B', 'k_T', 'k_D', 'N_T', 'p_0']
units = {'N_T': 'cm-3', 'p_0': 'cm-3', 'k_B': 'cm3/ns', 'k_T': 'cm3/ns', 'k_D': 'cm3/ns'}
units_html = {'N_T': 'cm<sup>-3</sup>', 'p_0': 'cm<sup>-3</sup>', 'k_B': 'cm<sup>3</sup>/ns',
'k_T': 'cm<sup>3</sup>/ns', 'k_D': 'cm<sup>3</sup>/ns'}
factors = {'k_B': 1e-20, 'k_T': 1e-20, 'k_D': 1e-20, 'p_0': 1e12, 'N_T': 1e12}
fvalues = {'k_T': None, 'k_B': None, 'k_D': None, 'N_T': None, 'p_0': None}
gvalues = {'k_B': 30e-20, 'k_T': 12000e-20, 'k_D': 80e-20, 'N_T': 60e12, 'p_0': 65e12}
gvalues_range = {'k_B': [1e-20, 1e-18], 'k_T': [1e-18, 1e-16], 'k_D': [1e-20, 1e-18], 'p_0': [1e12, 1e14],
'N_T': [1e12, 1e14]}
n_keys = ('n_e', 'n_t', 'n_h') # need to be ordered same way as rate_equations input
n_init = lambda N_0: {'n_e': N_0, 'n_t': 0, 'n_h': N_0}
conc_ca_ids = ('n_e', 'n_h')
Model.__init__(self, ids, units, units_html, factors, fvalues, gvalues, gvalues_range, n_keys, n_init,
conc_ca_ids, ['k_B < k_T', 'k_D < k_T'])
@staticmethod
def calculate_contributions(t, k_T, k_B, k_D, N_T, p_0, n_e, n_t, n_h, **kwargs):
""" Calculate the total contributions to the TRPL
:param k_T: trapping rate constant (cm3/ns)
:param k_B: bimolecular rate constant (cm3/ns)
:param k_D: Auger detrapping rate constant (cm3/ns)
:param N_T: trap state concentration (cm-3)
:param p_0: doping concentration (cm-3)
:param n_e: electron concentration (cm-3)
:param n_t: trapped electron concentration (cm-3)
:param n_h: hole concentration (cm-3)
:param t: time (ns)"""
T = sci.trapz(k_T * n_e * (N_T - n_t) * (n_h + p_0), t)
B = sci.trapz(k_B * n_e * (n_h + p_0) * (n_e + n_h + p_0), t)
D = sci.trapz(k_D * n_t * (n_h + p_0) * n_e, t)
S = T + B + D
return {'T': T / S * 100, 'B': B / S * 100, 'D': D / S * 100}
@staticmethod
def rate_equations(n_e, n_t, n_h, k_B, k_T, k_D, p_0, N_T, **kwargs):
""" Rate equations of the BTD model
:param n_e: electron concentration (cm-3)
:param n_t: trapped electron concentration (cm-3)
:param n_h: hole concentration (cm-3)
:param k_B: bimolecular recombination rate constant (cm3/ns)
:param k_T: trapping rate constant (cm3/ns)
:param k_D: detrapping rate constant (cm3/ns)
:param p_0: doping concentration (cm-3)
:param N_T: trap states concentration (cm-3) """
B = k_B * n_e * (n_h + p_0)
T = k_T * n_e * (N_T - n_t)
D = k_D * n_t * (n_h + p_0)
dne_dt = - B - T
dnt_dt = T - D
dnh_dt = - B - D
return {'n_e': dne_dt, 'n_t': dnt_dt, 'n_h': dnh_dt}
def calculate_trpl(self, t, N_0, I, y_0, p_0, **kwargs):
""" Calculate the normalised TRPL intensity
:param t: time (ns)
:param N_0: initial carrier concentration (cm-3)
:param y_0: background intensity
:param p_0: doping concentration (cm-3)
:param I: amplitude factor
:param kwargs: keyword arguments passed to the calculate_concentrations function
Examples
--------
>>> _ = BTDModel().calculate_trpl(np.linspace(0, 100), N_0=1e17, p=1, k_B=50e-20, k_T=12000e-20, N_T=60e12,
... p_0=65e12, k_D=80e-20, I=1, y_0=0)
>>> _ = BTDModel().calculate_concentrations(np.linspace(0, 100), N_0=1e17, k_B=50e-20, k_T=12000e-20, N_T=60e12,
... p_0=65e12, k_D=80e-20, p=1000) """
n = self.calculate_concentrations(t, N_0, p_0=p_0, **kwargs)
I_TRPL = n['n_e'][-1] * (n['n_h'][-1] + p_0) / N_0
return I * I_TRPL / I_TRPL[0] + y_0
def get_recommendations(self, contributions, threshold=10):
""" Get recommendations for the contributions """
recs = []
for key in contributions:
if np.max(contributions[key]) < threshold:
if 'Bimolecular' in key and self.fvalues.get('k_B', 1) != 0:
recs.append(self.get_rec_string('bimolecular', 'higher'))
elif 'Trapping' in key and self.fvalues.get('k_T', 1) != 0 and self.fvalues.get('N_T', 1) != 0:
recs.append(self.get_rec_string('trapping', 'lower'))
elif 'Detrapping' in key and self.fvalues.get('k_D', 1) != 0:
recs.append(self.get_rec_string('detrapping'))
recs.append('Note: For the bimolecular-trapping-detrapping model, although a low contribution suggests that the'
' parameter associated with the process are not be accurate, a non-negligible contribution does not '
'automatically indicate that the parameters retrieved are accurate due to the complex nature of the '
'model. It is recommended to perform a grid fitting analysis with this model.')
return recs
models = {'Bimolecular-Trapping': BTModel(), 'Bimolecular-Trapping-Detrapping': BTDModel()}
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
3474531 | import logging
import os
from dotenv import load_dotenv
from flask import Flask
from flask_mail import Mail
from flask_wtf import CSRFProtect
from flask import g
from flask.sessions import SecureCookieSessionInterface
from flask_login import user_loaded_from_header
import logging_config
load_dotenv()
postgres_user = os.getenv('POSTGRES_USER')
postgres_pw = os.getenv('POSTGRES_PW')
postgres_url = os.getenv('POSTGRES_URL')
postgres_db = os.getenv('POSTGRES_DB')
PYSPARK_URL = os.getenv('PYSPARK_URL')
BACKEND_SLURM = 'slurm'
BACKEND_PYSPARK = 'pyspark'
SSH_TARGET_PATH = os.getenv('SSH_TARGET_PATH')
SSH_USER_NAME = os.getenv('SSH_USER_NAME')
EMAIL_SMTP_SERVER = os.getenv('EMAIL_SMTP_SERVER')
EMAIL_SMTP_PORT = os.getenv('EMAIL_SMTP_PORT')
EMAIL_SMTP_USERNAME = os.getenv('EMAIL_SMTP_USERNAME')
EMAIL_ADDRESS = os.getenv('EMAIL_ADDRESS')
EMAIL_PASSWORD = os.getenv('EMAIL_PASSWORD')
LOG_LEVEL = logging.DEBUG if os.getenv('LOG_LEVEL') == 'DEBUG' else logging.INFO
LOG_DIRECTORY = os.getenv('LOG_DIRECTORY')
logging_config.configure_logging(os.path.join(LOG_DIRECTORY, 'cuizinart.log'), LOG_LEVEL)
app = Flask('cuizinart', static_folder=os.path.join(os.path.dirname(__file__), 'frontend', 'build', 'static'),
template_folder=os.path.join(os.path.dirname(__file__), 'frontend', 'build'))
CSRFProtect(app)
DB_URL = 'postgresql://{user}:{pw}@{url}/{db}'.format(user=postgres_user, pw=postgres_pw,
url=postgres_url, db=postgres_db)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URL
app.secret_key = os.getenv('APP_SECRET_KEY')
app.config['SECURITY_PASSWORD_SALT'] = os.getenv('PASSWORD_SALT')
app.config['SECURITY_CONFIRMABLE'] = True
app.config['SECURITY_MSG_CONFIRMATION_REQUIRED'] = ("Email requires activation.", "error")
app.config['SECURITY_REGISTERABLE'] = True
app.config['SECURITY_SEND_REGISTER_EMAIL'] = True
app.config['SECURITY_RECOVERABLE'] = True
app.config['SECURITY_CHANGEABLE'] = True
app.config['SECURITY_EMAIL_SENDER'] = EMAIL_ADDRESS
app.config['SECURITY_EMAIL_SUBJECT_REGISTER'] = 'Welcome to Cuizinart'
app.config['WTF_CSRF_ENABLED'] = False
app.config['MAIL_SERVER'] = EMAIL_SMTP_SERVER
app.config['MAIL_PORT'] = EMAIL_SMTP_PORT
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = EMAIL_SMTP_USERNAME
app.config['MAIL_PASSWORD'] = EMAIL_PASSWORD
mail = Mail(app)
# Do not set session cookie as we're using token-based authentication
# https://flask-login.readthedocs.io/en/latest/#disabling-session-cookie-for-apis
class CustomSessionInterface(SecureCookieSessionInterface):
"""Prevent creating session from API requests."""
def save_session(self, *args, **kwargs):
return
app.session_interface = CustomSessionInterface()
@user_loaded_from_header.connect
def user_loaded_from_header(self, user=None):
g.login_via_header = True
app.config['SECURITY_TOKEN_MAX_AGE'] = 7*24*60*60 # 1 week
| StarcoderdataPython |
3362827 | from unittest import TestCase
from parameterized import parameterized
from design_patterns.solid.ocp.common import Color, Size
from design_patterns.solid.ocp.product_filter_bad import ProductFilter
from unit.ocp_common import products
class TestProductFilter(TestCase):
def setUp(self) -> None:
self.products = products
self.pfilter = ProductFilter()
@parameterized.expand(
[
(Color.RED, ["Car"]),
(Color.BLUE, ["Notebook", "Ship"]),
(Color.GREEN, ["Apple"]),
]
)
def test_filter_by_color(self, color, expected):
product_filtered = []
for product in self.pfilter.filter_by_color(self.products, color):
product_filtered.append(product.name)
self.assertListEqual(product_filtered, expected)
@parameterized.expand(
[
(Size.SMALL, ["Apple"]),
(Size.MEDIUM, ["Notebook"]),
(Size.LARGE, ["Car", "Ship"]),
]
)
def test_filter_by_size(self, size, expected):
product_filtered = []
for product in self.pfilter.filter_by_size(self.products, size):
product_filtered.append(product.name)
self.assertListEqual(product_filtered, expected)
@parameterized.expand(
[(Color.BLUE, Size.MEDIUM, ["Notebook"]), (Color.BLUE, Size.LARGE, ["Ship"])]
)
def test_filter_by_size_and_color(self, color, size, expected):
product_filtered = []
for product in self.pfilter.filter_by_size_and_color(
self.products, size, color
):
product_filtered.append(product.name)
self.assertListEqual(product_filtered, expected)
| StarcoderdataPython |
3402240 | <gh_stars>100-1000
# Generated by Django 2.2.6 on 2019-10-09 16:00
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('backend', '0004_credentials_namedcredentials_personalnamedcredentials_staticcredentials'),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('sessionid', models.UUIDField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('validityperiod', models.DurationField(default=datetime.timedelta(days=1))),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_of_tickets', to=settings.AUTH_USER_MODEL)),
('connection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Connection')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shared', to='backend.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_of_tickets', to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
277358 | """Scripts to compute multitask features and targets
"""
from __future__ import print_function
import argparse
import csv
import glob
import json
import medleydb as mdb
from medleydb import mix
import mir_eval
import numpy as np
import os
import compute_training_data as C
EXCEPTIONS = {
'AnEndlessSporadic_Anything_STEM_04': 'synthesizer',
'Anberlin_TheFeelGoodDrag_STEM_04': 'synthesizer',
'ArcadeFire_BlackMirror_STEM_02': 'acoustic guitar',
'ArcadeFire_BlackMirror_STEM_07': 'acoustic guitar',
'BillyIdol_WhiteWedding_STEM_05': 'distorted electric guitar',
'Blink182_AllTheSmallThings_STEM_05': 'clean electric guitar',
'Blondie_OneWayOrAnother_STEM_06': 'distorted electric guitar',
'BlueOysterCult_DontFearTheReaper_STEM_05': 'clean electric guitar',
'LilyAllen_NotFair_STEM_04': 'clean electric guitar',
'TheAllAmericanRejects_DirtyLittleSecret_STEM_04': 'distorted electric guitar',
'TheAllmanBrothersBand_RamblinMan_STEM_06': 'clean electric guitar',
'TheLastGoodnight_PicturesOfYou_STEM_02': 'distorted_electric guitar'
}
def save_singlef0_output(times, freqs, output_path):
"""save singlef0 output to a csv file."""
with open(output_path, 'w') as fhandle:
csv_writer = csv.writer(fhandle, delimiter='\t')
for t, f in zip(times, freqs):
csv_writer.writerow([t, f])
def multif0_to_timefreq(times, freqs):
"""Unroll a multif0 annotation of the form (t, [f1, f2, f3])
to a list of (t, f) where t may be repeated.
Parameters
----------
times : list
Time stamps
freqs : list of lists
all frequencies for a given time stamp
Returns
-------
t_unrolled : list
Unrolled time stamps
f_unrolled : list
Unrolled frequency values
"""
t_unrolled = []
f_unrolled = []
for t, f_list in zip(times, freqs):
for f in f_list:
if f == 0:
continue
t_unrolled.append(t)
f_unrolled.append(f)
return t_unrolled, f_unrolled
def get_replace_info(mtrack, replace_path):
"""Go through repalced stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
replace_path : str
Path to where the resynthesized audio and annotations live
Returns
-------
replace_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
replace_altindices : dictionary
Dictionary keyed by stem id mapping to the replaced path.
Used by mix.mix_multitrack for replacing stems.
stem_indices : list
List of stem indices for replaced stems
"""
# glob for all replaced stems for this multitrack
replace_stems = glob.glob(os.path.join(
replace_path, '{}*_replace.wav'.format(mtrack.track_id)
))
# initialize
stem_indices = []
replace_stem_annotations = {}
replace_altindices = {}
# loop over resynthesized stems
for stem_path in replace_stems:
# path where annotation should live
annot_path = os.path.join(
replace_path,
"{}_vamp_pyin_pyin_smoothedpitchtrack.csv".format(
os.path.basename(stem_path).split('.')[0])
)
# if annotation doesn't exist, cry!
if not os.path.exists(annot_path):
print("[Warning] Couldn't find annotation for {}".format(stem_path))
continue
# parse stem index from file name
stem_id = int(os.path.basename(stem_path).split('_')[3].split('.')[0])
# load resynth annotation
times, freqs = mir_eval.io.load_ragged_time_series(
annot_path, delimiter=','
)
annot_t, annot_f = multif0_to_timefreq(times, freqs)
tags = ['multif0', 'vocal', 'melody']
stem_indices.append(stem_id)
# add annotation to dictionary
replace_stem_annotations[stem_id] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
# add resynth file to replacement dictionary
replace_altindices[stem_id] = stem_path
return replace_stem_annotations, replace_altindices, stem_indices
def get_resynth_info(mtrack, resynth_path, stem_indices):
"""Go through resynthesized stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
resynth_path : str
Path to where the resynthesized audio and annotations live
stem_indices : list
List of indices already used
Returns
-------
resynth_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
resynth_altindices : dictionary
Dictionary keyed by stem id mapping to the resynthesized path.
Used by mix.mix_multitrack for replacing stems.
stem_indices_guitar : list
List of stem indices containing any kind of resynthesized guitar
stem_indices_piano : list
List of stem indices containing any kind of resynthesized piano
"""
# glob for all resynth stems for this multitrack
resynth_stems = glob.glob(os.path.join(
resynth_path, '{}*_resynth.wav'.format(mtrack.track_id)
))
# initialize
stem_indices_piano = []
stem_indices_guitar = []
resynth_stem_annotations = {}
resynth_altindices = {}
# loop over resynthesized stems
for stem_path in resynth_stems:
# path where annotation should live
annot_path = os.path.join(
resynth_path,
"{}.txt".format(os.path.basename(stem_path).split('.')[0])
)
# parse stem index from file name
stem_id = int(os.path.basename(stem_path).split('_')[3].split('.')[0])
stem = mtrack.stems[stem_id]
if stem_id in stem_indices:
continue
# if annotation doesn't exist, cry!
if not os.path.exists(annot_path):
print("[Warning] Couldn't find annotation for {}".format(stem_path))
continue
# load resynth annotation
times, freqs = mir_eval.io.load_ragged_time_series(annot_path)
annot_t, annot_f = multif0_to_timefreq(times, freqs)
tags = ['multif0']
# apply tags based on whether instrument is piano or guitar
basename = os.path.basename(stem.audio_path.split('.')[0])
if basename in EXCEPTIONS.keys():
instrument = EXCEPTIONS[basename]
else:
instrument = stem.instrument
if 'piano' in instrument:
tags.append('piano')
stem_indices_piano.append(stem_id)
elif ('electric piano' in instrument or
'synthesizer' in instrument):
tags.append('keys')
stem_indices_piano.append(stem_id)
elif ('acoustic guitar' in instrument or
'clean electric guitar' in instrument or
'distorted electric guitar' in instrument):
tags.append('guitar')
stem_indices_guitar.append(stem_id)
else:
print("[Warning] resynth stem is instrument {}! skipping".format(
instrument))
continue
# add annotation to dictionary
resynth_stem_annotations[stem_id] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
# add resynth file to replacement dictionary
resynth_altindices[stem_id] = stem_path
return (resynth_stem_annotations, resynth_altindices,
stem_indices_guitar, stem_indices_piano)
def get_orig_stem_info(mtrack, stem_indices):
"""Go through original stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
stem_indices : list
List of stem indices already included
Returns
-------
orig_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
stem_annot_activity : dictionary
Dictionary keyed by stem id mapping to annotation activation information
orig_stem_indices : list
List of stem indices to include in mix.
"""
orig_stem_indices = []
stem_annot_activity = {}
orig_stem_annotations = {}
# go through the rest of the stems
for stem in mtrack.stems.values():
# skip this stem if it was resynthesized
if stem.stem_idx in stem_indices:
continue
# skip this stem if it has more than one instrument
if len(stem.instrument) > 1 or len(stem.f0_type) > 1:
continue
# skip this stem if it is a polyphonic instrument
if stem.f0_type[0] == 'p':
continue
# if stem is unpitched, just add it to the mix
if stem.f0_type[0] == 'u':
orig_stem_indices.append(stem.stem_idx)
# if stem is mono, add it to the mix and get its annotation
if len(stem.instrument) == 1 and stem.f0_type[0] == 'm':
orig_stem_indices.append(stem.stem_idx)
annot_t, annot_f, annot_activation = C.get_stem_annotation(
stem, mtrack
)
stem_annot_activity[stem.stem_idx] = annot_activation
tags = ['multif0']
if stem.instrument[0] in mix.VOCALS:
tags.append('vocal')
if stem.component == 'melody':
tags.append('melody')
if stem.component == 'bass':
tags.append('bass')
orig_stem_annotations[stem.stem_idx] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
return orig_stem_annotations, stem_annot_activity, orig_stem_indices
def save_annotation(times, freqs, save_path):
""" Save an annotation to a filepath or return None if annotation is
empty.
Parameters
----------
times : list
List of times
freqs : list
List of freqs
save_path : str
Path to save file
Returns
-------
output : str or None
If times/freqs are not empty, returns save_path. Otherwise
returns None.
"""
if len(times) > 0:
# using singlef0 save here because we have unwrapped multif0s
save_singlef0_output(times, freqs, save_path)
return save_path
else:
return None
def create_annotations(save_dir, track_id, stem_annotations):
"""Create a dictionary of annotations by type, given a list of annotations
by stem.
Parameters
----------
save_dir : str
Path to eventually save each annotation.
track_id : str
Medleydb trackid
stem_annotations : dictionary
Dictionary keyed by stem id with values 'times', 'freqs' and 'tags'
Returns
-------
annotations : dictionary
Dictionary keyed by annotation type (e.g. 'vocal') with values
'times' 'freqs' and 'path'.
"""
# create initial annotations dictionary
annotations = {
'multif0': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_annotation.txt".format(track_id))
},
'multif0_noguitar': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_noguitar_annotation.txt".format(track_id))
},
'multif0_nosynth': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_nosynth_annotation.txt".format(track_id))
},
'melody': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_melody_annotation.txt".format(track_id))
},
'vocal': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_vocal_annotation.txt".format(track_id))
},
'bass': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_bass_annotation.txt".format(track_id))
},
'piano': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_piano_annotation.txt".format(track_id))
},
'guitar': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_guitar_annotation.txt".format(track_id))
}
}
# loop over each stem annotation and add it to corresponding annotation
# types (e.g. stems with melody tag are added to the melody annotation)
for key in sorted(stem_annotations.keys()):
annot_dict = stem_annotations[key]
tags = annot_dict['tags']
# all stems should have the 'multif0' tag
if 'multif0' in tags and annot_dict['times'] is not None:
annotations['multif0']['times'].extend(annot_dict['times'])
annotations['multif0']['freqs'].extend(annot_dict['freqs'])
# if stem is guitar, add it to guitar
if 'guitar' in tags and annot_dict['times'] is not None:
annotations['guitar']['times'].extend(annot_dict['times'])
annotations['guitar']['freqs'].extend(annot_dict['freqs'])
# if stem is not guitar add it to the multif0 no guitar annotation
elif annot_dict['times'] is not None:
annotations['multif0_noguitar']['times'].extend(annot_dict['times'])
annotations['multif0_noguitar']['freqs'].extend(annot_dict['freqs'])
# if stem is piano add to piano annotation
if 'piano' in tags and annot_dict['times'] is not None:
annotations['piano']['times'].extend(annot_dict['times'])
annotations['piano']['freqs'].extend(annot_dict['freqs'])
# if stem is not synthesized (i.e. not piano or guitar) add it to
# the nosynth annotation
if ('piano' not in tags and
'guitar' not in tags and
'keys' not in tags and
annot_dict['times'] is not None):
annotations['multif0_nosynth']['times'].extend(annot_dict['times'])
annotations['multif0_nosynth']['freqs'].extend(annot_dict['freqs'])
# add melody stems to melody annotation
if 'melody' in tags and annot_dict['times'] is not None:
annotations['melody']['times'].extend(annot_dict['times'])
annotations['melody']['freqs'].extend(annot_dict['freqs'])
# add vocal stems to vocal annotation
if 'vocal' in tags and annot_dict['times'] is not None:
annotations['vocal']['times'].extend(annot_dict['times'])
annotations['vocal']['freqs'].extend(annot_dict['freqs'])
# add bass stems to bass annotation
if 'bass' in tags and annot_dict['times'] is not None:
annotations['bass']['times'].extend(annot_dict['times'])
annotations['bass']['freqs'].extend(annot_dict['freqs'])
return annotations
def create_annotation_save_pairs(annotations, mix_path, mix_path_noguitar,
mix_path_nosynth):
"""Create a dictionary that maps an audio file to its corresponding
multitask annotations.
Parameters
----------
annotations : dictionary
Dictionary mapping annotation type to 'times', 'freqs', and 'path'
to save.
mix_path : str
Path to full multif0 mix
mix_path_noguitar : str
Path to no guitar multif0 mix
mix_path_nosynth : str
Path to no synthesized stems multif0 mix
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio path to annotation paths by type.
"""
audio_annot_pairs = {
mix_path: {},
mix_path_noguitar: {},
mix_path_nosynth: {}
}
for annot_type in annotations.keys():
output = save_annotation(
annotations[annot_type]['times'],
annotations[annot_type]['freqs'],
annotations[annot_type]['path']
)
if annot_type == 'multif0':
audio_annot_pairs[mix_path]['multif0'] = output
elif annot_type == 'multif0_noguitar':
audio_annot_pairs[mix_path_noguitar]['multif0'] = output
elif annot_type == 'multif0_nosynth':
audio_annot_pairs[mix_path_nosynth]['multif0'] = output
elif annot_type == 'guitar':
audio_annot_pairs[mix_path]['guitar'] = output
audio_annot_pairs[mix_path_noguitar]['guitar'] = None
audio_annot_pairs[mix_path_nosynth]['guitar'] = None
elif annot_type == 'piano':
audio_annot_pairs[mix_path]['piano'] = output
audio_annot_pairs[mix_path_noguitar]['piano'] = output
audio_annot_pairs[mix_path_nosynth]['piano'] = None
else:
audio_annot_pairs[mix_path][annot_type] = output
audio_annot_pairs[mix_path_noguitar][annot_type] = output
audio_annot_pairs[mix_path_nosynth][annot_type] = output
return audio_annot_pairs
def generate_filtered_stems(stem_annot_activity, mtrack, save_dir):
"""Create filtered stems for stems with annotation activity info.
Parameters
----------
stem_annot_activity : dictionary
Dictionary mapping stem_id to annotation activity information
mtrack : MultiTrack
medleydb MultiTrack object
save_dir : str
Path to save new stems.
Returns
-------
filtered_altfiles : dictionary
Dictionary mapping stem_id to a path where the new stem is saved.
"""
filtered_altfiles = {}
# for each stem with an annotation filter, create filtered stem
for key in stem_annot_activity.keys():
if stem_annot_activity[key] is None:
continue
new_stem_path = os.path.join(
save_dir, "{}_STEM_{}_alt.wav".format(mtrack.track_id, key)
)
if not os.path.exists(new_stem_path):
C.create_filtered_stem(
mtrack.stems[key].audio_path, new_stem_path,
stem_annot_activity[key]
)
filtered_altfiles[key] = new_stem_path
return filtered_altfiles
def create_mixes(mtrack, mix_path, mix_path_noguitar, mix_path_nosynth,
stem_indices, stem_indices_guitar, stem_indices_piano,
altfiles):
"""Render artificial mixes to `mix_path', `mix_path_noguitar', and
`mix_path_nosynth'.
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
mix_path : str
Path to save full multif0 mix
mix_path_noguitar : str
Path to save no guitar multif0 mix
mix_path_nosynth : str
Path to save no synthesized stems multif0 mix
stem_indices : list
List of stems to include in the full mix
stem_indices_guitar : list
List of guitar stems
stem_indices_piano : list
List of piano stems
altfiles : dict
Dictionary of replacement files mapping stem id to new path.
Returns
-------
mix_filepaths : list
List of filepaths included in the full mix
mix_noguitar_filepaths : list
List of filepaths included in the no guitar mix
mix_nosynth_filepaths : list
List of filepaths included in the no resynth mix
"""
# create resynth mix
if len(stem_indices) > 0:
mix_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path, stem_indices=stem_indices, alternate_files=altfiles
)
else:
mix_filepaths = None
# create no guitar and no synth mixes
stem_indices_noguitar = [
s for s in stem_indices if s not in stem_indices_guitar
]
stem_indices_nosynth = [
s for s in stem_indices_noguitar if s not in stem_indices_piano
]
altfiles_noguitar = {
k: v for k, v in altfiles.items() if k in stem_indices_noguitar
}
altfiles_nosynth = {
k: v for k, v in altfiles.items() if k in stem_indices_nosynth
}
if len(stem_indices_noguitar) > 0:
mix_noguitar_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path_noguitar, stem_indices=stem_indices_noguitar,
alternate_files=altfiles_noguitar
)
else:
mix_noguitar_filepaths = None
if len(stem_indices_nosynth) > 0:
mix_nosynth_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path_nosynth, stem_indices=stem_indices_nosynth,
alternate_files=altfiles_nosynth
)
else:
mix_nosynth_filepaths = None
return mix_filepaths, mix_noguitar_filepaths, mix_nosynth_filepaths
def create_complete_resynth_mix(mtrack, resynth_path, replace_path, save_dir):
"""Create resynthesized mixes and all corresponding annotations
Audio files:
- (A) Multif0 remix with synth guitar + synth piano
- (B) Multif0 remix with synth piano
- (C) Multif0 remix
- (D) Original track
Annotations:
filename : description (corresponding audio file)
- Artist_Track_multif0_annotation.txt : multif0 + synth piano/guitar (A)
- Artist_Track_multif0_noguiar_annotation.txt : multif0 + synth piano (B)
- Artist_Track_multif0_nosynth_annotation.txt : multif0 (C)
- Artist_Track_melody_annotation.txt : all melody f0s (A,B,C,[D])
- Artist_Track_vocal_annotation.txt : all vocal f0s (A,B,C,[D])
- Artist_Track_bass_annotation.txt : all bass f0s (A,B,C,[D])
- Artist_Track_piano_annotation.txt : all piano f0s (A,B)
- Artist_Track_guitar_annotation.txt : all guitar f0s (A)
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
resynth_path : str
Path where resynthesized files live
replace_path : str
Path where replacement files live
save_dir : str
Path to save output.
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio files to annotation files by type.
"""
# do nothing if track has bleed
if mtrack.has_bleed:
return None
# mix audio save paths
mix_path = os.path.join(
save_dir, "{}_MIX_complete_resynth.wav".format(mtrack.track_id)
)
mix_path_noguitar = os.path.join(
save_dir, "{}_MIX_complete_noguitar.wav".format(mtrack.track_id)
)
mix_path_nosynth = os.path.join(
save_dir, "{}_MIX_complete_nosynth.wav".format(mtrack.track_id)
)
# define common structures
stem_indices = []
altfiles = {}
stem_annotations = {}
# get all annotation and index info from resynthesized stems
(replace_stem_annotations, replace_altindices,
stem_indices_replace) = get_replace_info(
mtrack, replace_path
)
stem_indices.extend(stem_indices_replace)
for key, value in replace_stem_annotations.items():
stem_annotations[key] = value
for key, value in replace_altindices.items():
altfiles[key] = value
# get all annotation and index info from resynthesized stems
(resynth_stem_annotations, resynth_altindices,
stem_indices_guitar, stem_indices_piano) = get_resynth_info(
mtrack, resynth_path, stem_indices
)
stem_indices.extend(stem_indices_piano)
stem_indices.extend(stem_indices_guitar)
for key, value in resynth_stem_annotations.items():
stem_annotations[key] = value
for key, value in resynth_altindices.items():
altfiles[key] = value
# get all annotation and index info from remaining original stems
(orig_stem_annotations, stem_annot_activity,
orig_stem_indices) = get_orig_stem_info(mtrack, stem_indices)
# fill info to common structures
stem_indices.extend(orig_stem_indices)
for key, value in orig_stem_annotations.items():
stem_annotations[key] = value
# create annotation dictionary
annotations = create_annotations(
save_dir, mtrack.track_id, stem_annotations
)
# save annotation and create pairs
audio_annot_pairs = create_annotation_save_pairs(
annotations, mix_path, mix_path_noguitar, mix_path_nosynth
)
# create new versions of stems with annotation filters
filtered_altfiles = generate_filtered_stems(
stem_annot_activity, mtrack, save_dir
)
for key, value in filtered_altfiles.items():
altfiles[key] = value
# make sure there is a least one stem left in the mix
if len(stem_indices) == 0:
print("{} had no stems after filtering :( ".format(mtrack.track_id))
return None
# generate mixes
mix_filepaths, mix_noguitar_filepaths, mix_nosynth_filepaths = create_mixes(
mtrack, mix_path, mix_path_noguitar, mix_path_nosynth,
stem_indices, stem_indices_guitar, stem_indices_piano, altfiles
)
if mix_filepaths is None:
audio_annot_pairs.pop(mix_path)
if mix_noguitar_filepaths is None:
audio_annot_pairs.pop(mix_path_noguitar)
if mix_nosynth_filepaths is None:
audio_annot_pairs.pop(mix_path_nosynth)
return audio_annot_pairs
def get_annotation_mono(mtrack, stem_list, use_estimate=True):
"""Get annotation for a subset of stems if all stems are mono
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
stem_list : list
list of Track objects
Returns
-------
times : list or None
list of times or None
freqs : list or None
list of freqs or None
"""
# if no stems, the annotation is empty
if len(stem_list) == 0:
times = []
freqs = []
# otherwise, check if all stems are mono
else:
all_mono = True
if mtrack.has_bleed:
all_mono = False
for stem in stem_list:
if len(stem.instrument) > 1:
all_mono = False
elif stem.f0_type[0] != 'm':
all_mono = False
# if all stems are mono add the annotation to the mix
if all_mono:
times = []
freqs = []
for stem in stem_list:
annot_t, annot_f, _ = C.get_stem_annotation(
stem, mtrack, use_estimate=use_estimate
)
# if there is no annotation return None
if annot_t is None or annot_f is None:
return None, None
times.extend(annot_t)
freqs.extend(annot_f)
else:
times = None
freqs = None
return times, freqs
def get_fullmix_annotations(mtrack, save_dir):
"""Get annotations corresponding to original medleydb mixes.
Parameters
----------
mtrack : MultiTrack
A medleydb MultiTrack object.
save_dir : str
Path to save annotation files.
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio files to annotation files by type.
"""
audio_annot_pairs = {mtrack.mix_path: {}}
melody_stems = []
vocal_stems = []
bass_stems = []
guitar_stems = []
piano_stems = []
guitars = [
'acoustic guitar',
'clean electric guitar',
'distorted electric guitar'
]
for stem in mtrack.stems.values():
if any(inst in mix.VOCALS for inst in stem.instrument):
vocal_stems.append(stem)
if 'Unlabeled' in stem.instrument and stem.component == 'melody':
vocal_stems.append(stem)
if stem.component == 'bass':
bass_stems.append(stem)
if stem.component == 'melody':
melody_stems.append(stem)
if any(inst in guitars for inst in stem.instrument):
guitar_stems.append(stem)
if any(inst == 'piano' for inst in stem.instrument):
piano_stems.append(stem)
# use melody if there is melody or none
if mtrack.dataset_version == 'V1':
if mtrack.melody3_annotation is not None:
annot = np.array(mtrack.melody3_annotation).T
melody_times, melody_freqs = multif0_to_timefreq(
annot[0], annot[1:].T)
else:
melody_times = []
melody_freqs = []
else:
melody_times, melody_freqs = get_annotation_mono(mtrack, melody_stems)
if melody_times is not None:
output = save_annotation(
melody_times,
melody_freqs,
os.path.join(save_dir, '{}_MIX_melody.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['melody'] = output
# use vocals if all vocals are mono or there are none
vocal_times, vocal_freqs = get_annotation_mono(mtrack, vocal_stems, use_estimate=False)
if vocal_times is not None:
output = save_annotation(
vocal_times,
vocal_freqs,
os.path.join(save_dir, '{}_MIX_vocal.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['vocal'] = output
# use bass if all bass is mono or there are none
bass_times, bass_freqs = get_annotation_mono(mtrack, bass_stems, use_estimate=False)
if bass_times is not None:
output = save_annotation(
bass_times,
bass_freqs,
os.path.join(save_dir, '{}_MIX_bass.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['bass'] = output
# mark that there's no piano/guitar if there are no stems with
# those instruments
if len(piano_stems) == 0:
audio_annot_pairs[mtrack.mix_path]['piano'] = None
if len(guitar_stems) == 0:
audio_annot_pairs[mtrack.mix_path]['guitar'] = None
return audio_annot_pairs
def get_all_audio_annot_pairs(mtrack, save_dir, resynth_path, replace_path):
"""For a given multitrack get all types of mixes and corresponding
annotations, and save a json file with all info.
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object.
save_dir : str
Path to save json output file.
resynth_path : str
Path to where resynthesized stems live.
replace_path : str
Path to where replaced stems live
Returns
-------
json_path : str
Path to saved json file
"""
print(" Resynth annotations and mixing...")
resynth_pairs = create_complete_resynth_mix(
mtrack, resynth_path, replace_path, save_dir)
print(" Fullmix annotations")
fullmix_pairs = get_fullmix_annotations(mtrack, save_dir)
all_pairs = {}
if resynth_pairs is not None:
for key, value in resynth_pairs.items():
all_pairs[key] = value
for key, value in fullmix_pairs.items():
all_pairs[key] = value
json_path = os.path.join(
save_dir, "{}_training_pairs.json".format(mtrack.track_id)
)
with open(json_path, 'w') as fhandle:
json.dump(all_pairs, fhandle, indent=2)
return json_path
def main(args):
mtracks = mdb.load_all_multitracks(
dataset_version=['V1', 'V2', 'EXTRA'])
for mtrack in mtracks:
print("Processing {}...".format(mtrack.track_id))
if os.path.exists(os.path.join(args.save_dir,
"{}_training_pairs.json".format(mtrack.track_id))):
print(" already done!")
continue
json_path = get_all_audio_annot_pairs(
mtrack, args.save_dir, args.resynth_path, args.replace_path
)
print("...saved to {}".format(json_path))
print("")
print("done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Generate feature files for multif0 learning.")
parser.add_argument("save_dir",
type=str,
help="Path to save npy files.")
parser.add_argument("resynth_path",
type=str,
help="resynth path")
parser.add_argument("replace_path",
type=str,
help="replace path")
main(parser.parse_args())
| StarcoderdataPython |
6544963 | <reponame>crisperdue/holpy
# Author: <NAME>
from kernel.type import TFun, TConst, NatType
from kernel.term import Term, Const, Binary
from data.list import ListType, mk_literal_list, is_literal_list, dest_literal_list
"""Utility functions for characters and strings."""
charT = TConst("char")
stringT = TConst("string")
Char = Const("Char", TFun(NatType, charT))
String = Const("String", TFun(ListType(charT), stringT))
def mk_char(c):
"""Given a Python string of length 1, return the corresponding
HOL character.
"""
assert isinstance(c, str) and len(c) == 1, "mk_char: expect a string of length 1"
return Char(Binary(ord(c)))
def mk_string(s):
"""Given a Python string, return the corresponding HOL string."""
assert isinstance(s, str), "mk_string: expect a string"
return String(mk_literal_list([mk_char(c) for c in s], charT))
def is_char(t):
"""Whether the given term is a HOL character."""
assert isinstance(t, Term), "is_char"
return t.is_comb('Char', 1) and t.arg.is_binary()
def is_string(t):
"""Whether the given term is a HOL string."""
assert isinstance(t, Term), "is_string"
return t.is_comb('String', 1) and is_literal_list(t.arg) and \
all(is_char(c) for c in dest_literal_list(t.arg))
def dest_char(t):
"""Given a HOL character, return the corresponding Python string
of length 1.
"""
assert isinstance(t, Term), "dest_char"
assert t.is_comb('Char', 1) and t.arg.is_binary(), "dest_char"
return chr(t.arg.dest_binary())
def dest_string(t):
"""Given a HOL string, return the corresponding Python string."""
assert isinstance(t, Term), "dest_string"
assert t.is_comb('String', 1) and is_literal_list(t.arg), "dest_string"
return ''.join(dest_char(c) for c in dest_literal_list(t.arg))
| StarcoderdataPython |
8118527 | <filename>instapp/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
profile_photo = models.ImageField(upload_to='profile-pics')
bio = models.TextField()
user= models.ForeignKey(User)
@classmethod
def get_all(cls):
all_objects = Profile.objects.all()
return all_objects
@classmethod
def search_by_username(cls,search_term):
profile = cls.objects.filter(user__name__icontains=search_term)
return profile
@classmethod
def update_caption(cls,current_value,new_value):
fetched_object = Image.objects.filter(name=current_value).update(name=new_value)
return fetched_object
@classmethod
def save_profile(self):
return self.save()
@classmethod
def delete_image(self):
return self.delete()
class Image(models.Model):
image = models.ImageField(upload_to='images-uploaded')
image_name = models.CharField(max_length =30)
image_caption = models.CharField(max_length =30)
image_likes = models.PositiveIntegerField()
image_comments = models.TextField()
user = models.ForeignKey(User,on_delete=models.CASCADE ,null=True)
@classmethod
def get_all(cls):
all_objects = Image.objects.all()
return all_objects
@classmethod
def get_image_by_id(cls,incoming_id):
image_result = cls.objects.get(id=incoming_id)
return image_result
@classmethod
def update_caption(cls,current_value,new_value):
fetched_object = Image.objects.filter(name=current_value).update(name=new_value)
return fetched_object
@classmethod
def save_image(self):
return self.save()
@classmethod
def delete_image(self):
return self.delete()
class Comment(models.Model):
user = models.ForeignKey(User,null=True)
post=models.ForeignKey(Image,related_name='comments',null=True)
comment=models.CharField(max_length=200,null=True)
def __str__(self):
return self.comment
class Likes(models.Model):
user = models.OneToOneField(User,related_name='l_user')
post=models.ForeignKey(Image,related_name='likes')
like=models.CharField(max_length=3,default='1')
| StarcoderdataPython |
1821549 | import boto3
from botocore.config import Config
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
from rest_framework import status
from rest_framework.permissions import IsAuthenticatedOrReadOnly, AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Organization, Tag, WelcomeMessage
from misc.models import File
from .serializers import BaseOrganizationSerializer, DetailOrganizationSerializer, \
WelcomeMessageSerializer
from misc.serializers import FileSerializer
from users.permissions import NewHirePermission, AdminPermission
def home(request):
return render(request, 'index.html')
class OrgView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request):
org = BaseOrganizationSerializer(Organization.object.get())
return Response(org.data)
class OrgDetailView(APIView):
def get(self, request):
org = DetailOrganizationSerializer(Organization.object.get())
return Response(org.data)
def patch(self, request):
serializer = DetailOrganizationSerializer(Organization.object.get(), data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class WelcomeMessageView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request):
welcome_messages = WelcomeMessage.objects.all()
serializer = WelcomeMessageSerializer(welcome_messages, many=True)
return Response(serializer.data)
class TagView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request):
tags = [i.name for i in Tag.objects.all()]
return Response(tags)
class CSRFTokenView(APIView):
permission_classes = (AllowAny,)
@method_decorator(ensure_csrf_cookie)
def get(self, request):
return HttpResponse()
class FileView(APIView):
permission_classes = (AdminPermission, NewHirePermission)
def get(self, request, id, uuid):
file = get_object_or_404(File, uuid=uuid, id=id)
url = file.get_url()
return Response(url)
def post(self, request):
serializer = FileSerializer(data={'name': request.data['name'], 'ext': request.data['name'].split('.')[1]})
serializer.is_valid(raise_exception=True)
f = serializer.save()
key = str(f.id) + '-' + request.data['name'].split('.')[0] + '/' + request.data['name']
f.key = key
f.save()
s3 = boto3.client('s3',
settings.AWS_REGION,
endpoint_url=settings.AWS_S3_ENDPOINT_URL,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
config=Config(signature_version='s3v4')
)
url = s3.generate_presigned_url(ClientMethod='put_object', ExpiresIn=3600,
Params={'Bucket': settings.AWS_STORAGE_BUCKET_NAME, 'Key': key})
return Response({'url': url, 'id': f.id})
def put(self, request, id):
file = get_object_or_404(File, pk=id)
file.active = True
file.save()
return Response(FileSerializer(file).data)
def delete(self, request, id):
if request.user.role == 1:
file = get_object_or_404(File, pk=id)
file.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LogoView(APIView):
def put(self, request, id):
file = get_object_or_404(File, pk=id)
file.active = True
file.save()
org = Organization.object.get()
org.logo = file
org.save()
return Response(FileSerializer(file).data)
| StarcoderdataPython |
6488566 | import io
import json
import time
import asyncio
import aiohttp
import traceback
from concurrent.futures._base import CancelledError
import collections
import telepot
import telepot.async.helper
class Bot(telepot._BotBase):
def __init__(self, token, loop=None):
super(Bot, self).__init__(token)
self._loop = loop if loop is not None else asyncio.get_event_loop()
@property
def loop(self):
return self._loop
@asyncio.coroutine
def _parse(self, response):
try:
data = yield from response.json()
except ValueError:
text = yield from response.text()
raise telepot.BadHTTPResponse(response.status, text)
if data['ok']:
return data['result']
else:
raise telepot.TelegramError(data['description'], data['error_code'])
@asyncio.coroutine
def getMe(self):
r = yield from asyncio.wait_for(
aiohttp.post(self._methodurl('getMe')),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendMessage(self, chat_id, text, parse_mode=None, disable_web_page_preview=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendMessage'),
params=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def forwardMessage(self, chat_id, from_chat_id, message_id):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('forwardMessage'),
params=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def _sendFile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if isinstance(inputfile, io.IOBase):
files = {filetype: inputfile}
r = yield from aiohttp.post(
self._methodurl(method),
params=self._rectify(params, allow_namedtuple=['reply_markup']),
data=files)
# `_http_timeout` is not used here because, for some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished). It is hard to say
# what value `_http_timeout` should be. In the future, maybe I should let user specify.
else:
params[filetype] = inputfile
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl(method),
params=self._rectify(params, allow_namedtuple=['reply_markup'])),
self._http_timeout)
return (yield from self._parse(r))
@asyncio.coroutine
def sendPhoto(self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['photo'])
return (yield from self._sendFile(photo, 'photo', p))
@asyncio.coroutine
def sendAudio(self, chat_id, audio, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['audio'])
return (yield from self._sendFile(audio, 'audio', p))
@asyncio.coroutine
def sendDocument(self, chat_id, document, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['document'])
return (yield from self._sendFile(document, 'document', p))
@asyncio.coroutine
def sendSticker(self, chat_id, sticker, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['sticker'])
return (yield from self._sendFile(sticker, 'sticker', p))
@asyncio.coroutine
def sendVideo(self, chat_id, video, duration=None, caption=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['video'])
return (yield from self._sendFile(video, 'video', p))
@asyncio.coroutine
def sendVoice(self, chat_id, voice, duration=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['voice'])
return (yield from self._sendFile(voice, 'voice', p))
@asyncio.coroutine
def sendLocation(self, chat_id, latitude, longitude, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendLocation'),
params=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendChatAction(self, chat_id, action):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendChatAction'),
params=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUserProfilePhotos'),
params=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getFile(self, file_id):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getFile'),
params=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUpdates(self, offset=None, limit=None, timeout=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUpdates'),
params=self._rectify(p)),
self._http_timeout+(0 if timeout is None else timeout)
)
return (yield from self._parse(r))
@asyncio.coroutine
def setWebhook(self, url=None, certificate=None):
p = self._strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
params=self._rectify(p),
data=files),
self._http_timeout)
else:
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
params=self._rectify(p)),
self._http_timeout)
return (yield from self._parse(r))
@asyncio.coroutine
def downloadFile(self, file_id, dest):
f = yield from self.getFile(file_id)
# `file_path` is optional in File object
if 'file_path' not in f:
raise telepot.TelegramError('No file_path returned', None)
try:
r = yield from asyncio.wait_for(
aiohttp.get(self._fileurl(f['file_path'])),
self._http_timeout)
d = dest if isinstance(dest, io.IOBase) else open(dest, 'wb')
while 1:
chunk = yield from r.content.read(self._file_chunk_size)
if not chunk:
break
d.write(chunk)
d.flush()
finally:
if not isinstance(dest, io.IOBase) and 'd' in locals():
d.close()
if 'r' in locals():
r.close()
@asyncio.coroutine
def answerInlineQuery(self, inline_query_id, results, cache_time=None, is_personal=None, next_offset=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('answerInlineQuery'),
params=self._rectify(p, allow_namedtuple=['results'])),
timeout=self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def messageLoop(self, handler=None, source=None, ordered=True, maxhold=3):
if handler is None:
handler = self.handle
def create_task_for(msg):
self.loop.create_task(handler(msg))
if asyncio.iscoroutinefunction(handler):
callback = create_task_for
else:
callback = handler
def handle(update):
try:
if 'message' in update:
callback(update['message'])
elif 'inline_query' in update:
callback(update['inline_query'])
elif 'chosen_inline_result' in update:
callback(update['chosen_inline_result'])
else:
# Do not swallow. Make sure developer knows.
raise BadFlavor(update)
except:
# Localize the error so message thread can keep going.
traceback.print_exc()
finally:
return update['update_id']
@asyncio.coroutine
def get_from_telegram_server():
offset = None # running offset
while 1:
try:
result = yield from self.getUpdates(offset=offset, timeout=20)
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([handle(update) for update in result]) + 1
except CancelledError:
raise
except:
traceback.print_exc()
yield from asyncio.sleep(0.1)
else:
yield from asyncio.sleep(0.1)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
@asyncio.coroutine
def get_from_queue_unordered(qu):
while 1:
try:
data = yield from qu.get()
update = dictify(data)
handle(update)
except:
traceback.print_exc()
@asyncio.coroutine
def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = yield from asyncio.wait_for(qu.get(), qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except asyncio.TimeoutError:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
if source is None:
yield from get_from_telegram_server()
elif isinstance(source, asyncio.Queue):
if ordered:
yield from get_from_queue(source)
else:
yield from get_from_queue_unordered(source)
else:
raise ValueError('Invalid source')
class SpeakerBot(Bot):
def __init__(self, token, loop=None):
super(SpeakerBot, self).__init__(token, loop)
self._mic = telepot.async.helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = asyncio.Queue()
self._mic.add(q)
ln = telepot.async.helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns, loop=None):
super(DelegatorBot, self).__init__(token, loop)
self._delegate_records = [p+({},) for p in delegation_patterns]
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_coroutine_obj, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or dict[id].done():
c = make_coroutine_obj((self, msg, id))
if not asyncio.iscoroutine(c):
raise RuntimeError('You must produce a coroutine *object* as delegate.')
dict[id] = self._loop.create_task(c)
else:
c = make_coroutine_obj((self, msg, id))
self._loop.create_task(c)
| StarcoderdataPython |
4905674 | <filename>setup.py
"""Python setup script.
:author: <NAME> <<EMAIL>>
:license: MIT, see license file or https://opensource.org/licenses/MIT
:created on 2018-10-06 10:55:36
:last modified by: <NAME>
:last modified time: 2019-07-23 10:27:04
"""
import io
import os
import re
from setuptools import setup
def read(*names, **kwargs):
try:
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
except IOError:
return ''
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.md')
setup(
name="PyQt5-stubs",
url="https://github.com/stlehmann/PyQt5-stubs",
author="<NAME>",
author_email="<EMAIL>",
description="PEP561 stub files for the PyQt5 framework",
long_description=long_description,
long_description_content_type="text/markdown",
version=find_version('PyQt5-stubs', '__init__.pyi'),
package_data={"PyQt5-stubs": ['*.pyi']},
install_requires=["PyQt5==5.13.*"],
packages=["PyQt5-stubs"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
)
| StarcoderdataPython |
1715539 | # -*- coding: utf-8 -*-
from sys import argv
script, file_name = argv
txt = open(file_name)
print txt.read()
#remember to close the file after open it
txt.close()
txt_again = raw_input('Input the file name again\n > ')
txt_file_again = open(txt_again)
print txt_file_again.read()
txt_file_again.close() | StarcoderdataPython |
1603009 | # coding: utf8
shortcuts = {
"Terminal":{
#https://github.com/0nn0/terminal-mac-cheatsheet
"Go to the beginning of the line you are currently typing on.":"Ctrl A ",
"Go to the end of the line you are currently typing on.":"Ctrl E ",
"Clears everything on current line":"Ctrl Q ",
"Clears the Screen":"Ctrl L ",
"Clears the Screen":"⌘K",
"Cut everything backwards to beginning of line":"Ctrl U",
"Cut everything forward to end of line":"Ctrl K",
"Cut one word backwards using white space as delimiter":"Ctrl W",
"Paste whatever was cut by the last cut command":"Ctrl Y",
"Same as backspace":"Ctrl H",
"Kill whatever you are running":"Ctrl C",
"Exit the current shell when no process is running, or send EOF to a the running process":"Ctrl D",
"Puts whatever you are running into a suspended background process. fg restores it.":"Ctrl Z",
"Undo the last command.":"Ctrl ⇧-",
"Swap the last two characters before the cursor":"Ctrl T",
"Move cursor one character forward":"Ctrl F",
"Move cursor one character backward":"Ctrl B",
"Move cursor one word forward":"⌥→",
"Move cursor one word backward":"⌥←",
"Swap the last two words before the cursor":"Esc T",
"Auto-complete files and folder names":"Tab "
}
} | StarcoderdataPython |
4838662 | <filename>rodan/test/views/test_workflow.py
from django.conf import settings
from rodan.models import Workflow, InputPort, OutputPort, ResourceType
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from model_mommy import mommy
from rodan.test.helpers import RodanTestSetUpMixin, RodanTestTearDownMixin
import six
import uuid
from rodan.serializers.workflow import version_map
class WorkflowViewTestCase(RodanTestTearDownMixin, APITestCase, RodanTestSetUpMixin):
"""
For clarification of some of the more confusing tests (i.e. loop, merging, and branching), see
https://github.com/DDMAL/Rodan/wiki/Workflow-View-Test
"""
def setUp(self):
self.setUp_rodan()
self.setUp_user()
self.setUp_basic_workflow()
self.client.force_authenticate(user=self.test_superuser)
def _validate(self, workflow_uuid):
workflow_update = {"valid": True}
return self.client.patch(
# "/api/workflow/{0}/".format(workflow_uuid), workflow_update, format="json"
reverse("workflow-detail", kwargs={"pk": workflow_uuid}), workflow_update, format="json"
)
def test_view__workflow_notfound(self):
response = self._validate(uuid.uuid1())
anticipated_message = {"detail": "Not found."}
self.assertEqual(response.data, anticipated_message)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_view__posting_valid(self):
workflow_obj = {
# "project": "http://localhost:8000/api/project/{0}/".format(
# self.test_project.uuid
# ),
"project": reverse("project-detail", kwargs={"pk": self.test_project.uuid}),
"name": "test workflow",
"valid": True,
}
# response = self.client.post("/api/workflows/", workflow_obj, format="json")
response = self.client.post(reverse("workflow-list"), workflow_obj, format="json")
anticipated_message = {
"valid": [
"You can't create a valid workflow - it must be validated through a PATCH request."
]
}
self.assertEqual(response.data, anticipated_message)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_view__post(self):
workflow_obj = {
# "project": "http://localhost:8000/api/project/{0}/".format(
# self.test_project.uuid
# ),
"project": reverse("project-detail", kwargs={"pk": self.test_project.uuid}),
"name": "<NAME>",
# "creator": "http://localhost:8000/api/user/{0}/".format(self.test_superuser.pk),
"creator": reverse("user-detail", kwargs={"pk": self.test_superuser.pk}),
"valid": False,
}
response = self.client.post("/api/workflows/", workflow_obj, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_view__validation_result_valid(self):
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
retr_workflow = Workflow.objects.get(pk=self.test_workflow.uuid)
self.assertTrue(retr_workflow.valid)
def test_view__validation_result_invalid(self):
test_workflow_no_jobs = mommy.make("rodan.Workflow", project=self.test_project)
response = self._validate(test_workflow_no_jobs.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
retr_workflow = Workflow.objects.get(pk=test_workflow_no_jobs.uuid)
self.assertFalse(retr_workflow.valid)
def test_workflowjob__no_output(self):
self.test_workflowjob2.output_ports.all().delete()
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_NO_OP")
def test_workflowjob__inputport_number_not_satisfy(self):
mommy.make(
"rodan.Connection",
_quantity=10,
output_port=self.test_workflowjob.output_ports.all()[0],
input_port__workflow_job=self.test_workflowjob2,
input_port__input_port_type=self.test_inputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_MANY_IP")
def test_workflowjob__outputport_number_not_satisfy(self):
mommy.make(
"rodan.Connection",
_quantity=10,
output_port__workflow_job=self.test_workflowjob,
output_port__output_port_type=self.test_outputporttype,
input_port__workflow_job__job=self.test_workflowjob2.job,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_MANY_OP")
def test_workflowjob__settings_not_satisfy(self):
self.test_job.settings = {
"type": "object",
"required": ["a"],
"properties": {"a": {"type": "number"}},
}
self.test_job.save()
self.test_workflowjob.job_settings = {"b": []}
self.test_workflowjob.save()
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_INVALID_SETTINGS")
def test_input__type_incompatible_with_job(self):
new_ipt = mommy.make("rodan.InputPortType")
new_ip = mommy.make( # noqa
"rodan.InputPort",
workflow_job=self.test_workflowjob,
input_port_type=new_ipt,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "IP_TYPE_MISMATCH")
def test_input__multiple_connections(self):
ip = self.test_workflowjob2.input_ports.all()[0]
mommy.make(
"rodan.Connection",
output_port=self.test_workflowjob.output_ports.all()[0],
input_port=ip,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "IP_TOO_MANY_CONNECTIONS")
def test_input__more_than_maximum(self):
for i in six.moves.range(self.test_inputporttype.maximum):
ip = mommy.make( # noqa
"rodan.InputPort",
workflow_job=self.test_workflowjob,
input_port_type=self.test_inputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_MANY_IP")
def test_input__fewer_than_minimum(self):
ip = self.test_workflowjob.input_ports.all()[0]
ip.delete()
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_FEW_IP")
def test_output__type_incompatible_with_job(self):
new_opt = mommy.make("rodan.OutputPortType")
new_op = mommy.make( # noqa
"rodan.OutputPort",
workflow_job=self.test_workflowjob,
output_port_type=new_opt,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "OP_TYPE_MISMATCH")
def test_output__more_than_maximum(self):
for o in six.moves.range(self.test_outputporttype.maximum):
op = mommy.make( # noqa
"rodan.OutputPort",
workflow_job=self.test_workflowjob,
output_port_type=self.test_outputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_MANY_OP")
def test_output__fewer_than_minimum(self):
opt2 = mommy.make( # noqa
"rodan.OutputPortType", maximum=3, minimum=1, job=self.test_job
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WFJ_TOO_FEW_OP")
def test_output__resourcetype_list_conflict_case1(self):
# CASE 1: input_port is list but output_type not.
new_ipt = mommy.make(
"rodan.InputPortType", maximum=1, minimum=0, job=self.test_job, is_list=True
)
new_ipt.resource_types.add(ResourceType.objects.get(mimetype="test/b"))
new_ip = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob2,
input_port_type=new_ipt,
)
op = self.test_workflowjob.output_ports.first()
conn = mommy.make("rodan.Connection", output_port=op, input_port=new_ip) # noqa
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "RESOURCETYPE_LIST_CONFLICT")
def test_output__resourcetype_list_conflict_case2(self):
# CASE 2: output_port is list but input_type not.
new_opt = mommy.make(
"rodan.OutputPortType",
maximum=1,
minimum=0,
job=self.test_job,
is_list=True,
)
new_opt.resource_types.add(ResourceType.objects.get(mimetype="test/b"))
new_op = mommy.make(
"rodan.OutputPort",
workflow_job=self.test_workflowjob,
output_port_type=new_opt,
)
ipt = self.test_workflowjob.input_ports.first().input_port_type
new_ip = mommy.make(
"rodan.InputPort", workflow_job=self.test_workflowjob, input_port_type=ipt
)
conn = mommy.make("rodan.Connection", output_port=new_op, input_port=new_ip) # noqa
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "RESOURCETYPE_LIST_CONFLICT")
def test_output__no_common_resource_type_simple(self):
new_ipt = mommy.make(
"rodan.InputPortType",
maximum=1,
minimum=0,
is_list=False,
job=self.test_job,
)
new_ipt.resource_types.add(
ResourceType.objects.get(mimetype="test/b")
) # consider the type of opt is 'test/a1' and 'test/a2'
new_ip = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob2,
input_port_type=new_ipt,
)
op = self.test_workflowjob.output_ports.first()
conn = mommy.make("rodan.Connection", output_port=op, input_port=new_ip) # noqa
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "NO_COMMON_RESOURCETYPE")
def test_output__no_common_resource_type_complex(self):
new_ipt1 = mommy.make(
"rodan.InputPortType",
maximum=1,
minimum=0,
is_list=False,
job=self.test_job,
)
new_ipt1.resource_types.add(
ResourceType.objects.get(mimetype="test/a1")
) # consider the type of opt is 'test/a1' and 'test/a2'
new_ipt2 = mommy.make(
"rodan.InputPortType",
maximum=1,
minimum=0,
is_list=False,
job=self.test_job,
)
new_ipt2.resource_types.add(
ResourceType.objects.get(mimetype="test/a2")
) # consider the type of opt is 'test/a1' and 'test/a2'
new_ip1 = mommy.make( # noqa
"rodan.InputPort",
workflow_job=self.test_workflowjob2,
input_port_type=new_ipt1,
)
new_ip2 = mommy.make( # noqa
"rodan.InputPort",
workflow_job=self.test_workflowjob2,
input_port_type=new_ipt2,
)
op = self.test_workflowjob.output_ports.first() # noqa
conn1 = mommy.make("rodan.Connection", output_port=op, input_port=new_ip1) # noqa
conn2 = mommy.make("rodan.Connection", output_port=op, input_port=new_ip2) # noqa
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "NO_COMMON_RESOURCETYPE")
def test_graph__empty(self):
test_workflow_no_jobs = mommy.make("rodan.Workflow", project=self.test_project)
response = self._validate(test_workflow_no_jobs.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WF_EMPTY")
def test_graph__not_connected(self):
workflowjob = mommy.make(
"rodan.WorkflowJob", workflow=self.test_workflow, job=self.test_job
)
inputport = mommy.make( # noqa
"rodan.InputPort",
workflow_job=workflowjob,
input_port_type=self.test_inputporttype,
)
outputport = mommy.make(
"rodan.OutputPort",
workflow_job=workflowjob,
output_port_type=self.test_outputporttype,
)
test_connection = mommy.make(
"rodan.Connection",
output_port=outputport,
input_port__input_port_type=self.test_inputporttype,
input_port__workflow_job__workflow=self.test_workflow,
input_port__workflow_job__job=self.test_job,
)
test_workflowjob2 = test_connection.input_port.workflow_job # noqa
outputport2 = mommy.make( # noqa
"rodan.OutputPort",
workflow_job=test_workflowjob2,
output_port_type=self.test_outputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WF_NOT_CONNECTED")
def test_graph__loop(self):
mommy.make(
"rodan.Connection",
input_port__input_port_type=self.test_inputporttype,
input_port__workflow_job=self.test_workflowjob,
output_port__output_port_type=self.test_outputporttype,
output_port__workflow_job=self.test_workflowjob2,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data["error_code"], "WF_HAS_CYCLES")
def test_graph__merging_workflow(self):
test_no_input_workflowjob = mommy.make(
"rodan.WorkflowJob", workflow=self.test_workflow
)
opt_for_no_input = mommy.make(
"rodan.OutputPortType",
minimum=0,
maximum=10,
is_list=False,
job=test_no_input_workflowjob.job,
)
opt_for_no_input.resource_types.add(
ResourceType.objects.get(mimetype="test/a1")
)
mommy.make(
"rodan.Connection",
output_port__workflow_job=test_no_input_workflowjob,
output_port__output_port_type=opt_for_no_input,
input_port__workflow_job=self.test_workflowjob2,
input_port__input_port_type=self.test_inputporttype,
)
test_connection3 = mommy.make(
"rodan.Connection",
output_port=self.test_workflowjob2.output_ports.all()[0],
input_port__input_port_type=self.test_inputporttype,
input_port__workflow_job__workflow=self.test_workflow,
input_port__workflow_job__job=self.test_job,
)
self.test_workflowjob3 = test_connection3.input_port.workflow_job
mommy.make(
"rodan.OutputPort",
workflow_job=self.test_workflowjob3,
output_port_type=self.test_outputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_graph__branching_workflow(self):
test_connection3 = mommy.make(
"rodan.Connection",
output_port__output_port_type=self.test_outputporttype,
output_port__workflow_job=self.test_workflowjob2,
input_port__input_port_type=self.test_inputporttype,
input_port__workflow_job__workflow=self.test_workflow,
input_port__workflow_job__job=self.test_job,
)
self.test_workflowjob3 = test_connection3.input_port.workflow_job
mommy.make(
"rodan.OutputPort",
workflow_job=self.test_workflowjob3,
output_port_type=self.test_outputporttype,
)
test_connection2 = mommy.make(
"rodan.Connection",
output_port__output_port_type=self.test_outputporttype,
output_port__workflow_job=self.test_workflowjob2,
input_port__input_port_type=self.test_inputporttype,
input_port__workflow_job__workflow=self.test_workflow,
input_port__workflow_job__job=self.test_job,
)
self.test_second_output_workflowjob = test_connection2.input_port.workflow_job
mommy.make(
"rodan.OutputPort",
workflow_job=self.test_second_output_workflowjob,
output_port_type=self.test_outputporttype,
)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_graph__branching_and_merging(self):
"""
wfjob------>wfjob_2------------>wfjob_5
`----->wfjob_3----wfjob_4--^
"""
self.test_workflowjob3 = mommy.make(
"rodan.WorkflowJob", workflow=self.test_workflow, job=self.test_job
)
self.test_workflowjob4 = mommy.make(
"rodan.WorkflowJob", workflow=self.test_workflow, job=self.test_job
)
self.test_workflowjob5 = mommy.make(
"rodan.WorkflowJob", workflow=self.test_workflow, job=self.test_job
)
outputport1 = self.test_workflowjob.output_ports.first()
outputport2 = self.test_workflowjob2.output_ports.first()
inputport3 = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob3,
input_port_type=self.test_inputporttype,
)
outputport3 = mommy.make(
"rodan.OutputPort",
workflow_job=self.test_workflowjob3,
output_port_type=self.test_outputporttype,
)
inputport4 = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob4,
input_port_type=self.test_inputporttype,
)
outputport4 = mommy.make(
"rodan.OutputPort",
workflow_job=self.test_workflowjob4,
output_port_type=self.test_outputporttype,
)
inputport5A = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob5,
input_port_type=self.test_inputporttype,
)
inputport5B = mommy.make(
"rodan.InputPort",
workflow_job=self.test_workflowjob5,
input_port_type=self.test_inputporttype,
)
outputport5 = mommy.make( # noqa
"rodan.OutputPort",
workflow_job=self.test_workflowjob5,
output_port_type=self.test_outputporttype,
)
mommy.make("rodan.Connection", output_port=outputport1, input_port=inputport3)
mommy.make("rodan.Connection", output_port=outputport3, input_port=inputport4)
mommy.make("rodan.Connection", output_port=outputport4, input_port=inputport5A)
mommy.make("rodan.Connection", output_port=outputport2, input_port=inputport5B)
response = self._validate(self.test_workflow.uuid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class WorkflowSerializationTestCase(
RodanTestTearDownMixin, APITestCase, RodanTestSetUpMixin
):
"""
For clarification of some of the more confusing tests (i.e. loop, merging, and branching), see
https://github.com/DDMAL/Rodan/wiki/Workflow-View-Test
"""
def setUp(self):
self.setUp_rodan()
self.setUp_user()
self.setUp_simple_dummy_workflow()
self.client.force_authenticate(user=self.test_superuser)
def test_export(self):
response = self.client.get(
"/api/workflow/{0}/?export=yes".format(self.test_workflow.uuid)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
serializer = version_map[settings.RODAN_WORKFLOW_SERIALIZATION_FORMAT_VERSION]
try:
serializer.validate(response.data)
except serializer.ValidationError as e:
self.fail("Exported workflow does not validate: {0}".format(e.detail))
def test_import_0_1(self):
serializer = version_map[0.1]
serialized = serializer.dump(self.test_workflow)
response = self.client.post(
# "/workflows/",
reverse("workflow-list"),
{
"serialized": serialized,
# "project": "http://localhost:8000/api/project/{0}/".format(
# self.test_project.uuid
# ),
"project": reverse("project-detail", kwargs={"pk": self.test_project.uuid}),
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.test_project.workflows.count(), 2)
serialized["workflow_jobs"][0]["job_name"] = "hahahaha"
response = self.client.post(
# "/workflows/",
reverse("workflow-list"),
{
"serialized": serialized,
# "project": "http://localhost:8000/api/project/{0}/".format(
# self.test_project.uuid
# ),
"project": reverse("project-detail", kwargs={"pk": self.test_project.uuid}),
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{
"serialized": {
"workflow_jobs[0].job_name": (
u"Job hahahaha does not exist in current Rodan installation."
)
}
},
)
class WorkflowViewInvalidateTestCase(
RodanTestTearDownMixin, APITestCase, RodanTestSetUpMixin
):
"""
Unlike the test case under /test/models/test_workflow.py, this tests the invalidation
using HTTP requests.
"""
def setUp(self):
self.setUp_rodan()
self.setUp_user()
self.client.force_authenticate(user=self.test_superuser)
self.setUp_basic_workflow()
# force valid=True
self.test_workflow.valid = True
self.test_workflow.save()
def test_creating_and_reputting_workflowgroup_should_not_invalidate(self):
response = self.client.post(
# "/workflowjobgroups/",
reverse("workflowjobgroup-list"),
{
"workflow_jobs": [
# "http://localhost:8000/api/workflowjob/{0}/".format(
# self.test_workflowjob.uuid
# ),
reverse("workflowjob-detail", kwargs={"pk": self.test_workflowjob.uuid}),
# "http://localhost:8000/api/workflowjob/{0}/".format(
# self.test_workflowjob2.uuid
# ),
reverse("workflowjob-detail", kwargs={"pk": self.test_workflowjob2.uuid}),
],
"name": "test",
},
format="json",
)
assert response.status_code == status.HTTP_201_CREATED, "this should pass"
self.test_workflow.refresh_from_db()
self.assertTrue(self.test_workflow.valid)
response = self.client.put(
# "/api/workflowjobgroup/{0}/".format(response.data["uuid"]),
reverse("workflowjobgroup-detail", kwargs={"pk": response.data["uuid"]}),
response.data,
format="json",
)
assert response.status_code == status.HTTP_200_OK, "this should pass"
self.test_workflow.refresh_from_db()
self.assertTrue(self.test_workflow.valid)
def test_altering_workflowgroup_should_not_invalidate(self):
# add a wfj group
self.test_workflowjobgroup = mommy.make(
"rodan.WorkflowJobGroup", workflow=self.test_workflow
)
self.test_workflowjob.group = self.test_workflowjobgroup
self.test_workflowjob.save()
# force valid=True
self.test_workflow.valid = True
self.test_workflow.save()
response = self.client.put(
# "/api/workflowjobgroup/{0}/".format(self.test_workflowjobgroup.pk),
reverse("workflowjobgroup-detail", kwargs={"pk": self.test_workflowjobgroup.pk}),
{
"workflow_jobs": [
# "http://localhost:8000/api/workflowjob/{0}/".format(
# self.test_workflowjob.uuid
# ),
reverse("workflowjob-detail", kwargs={"pk": self.test_workflowjob.uuid}),
# "http://localhost:8000/api/workflowjob/{0}/".format(
# self.test_workflowjob2.uuid
# ),
reverse("workflowjob-detail", kwargs={"pk": self.test_workflowjob2.uuid}),
],
"name": "test",
},
format="json",
)
assert response.status_code == status.HTTP_200_OK, "this should pass"
self.test_workflow.refresh_from_db()
self.assertTrue(self.test_workflow.valid)
def test_deleting_workflowgroup_should_not_invalidate(self):
# add a wfj group
self.test_workflowjobgroup = mommy.make(
"rodan.WorkflowJobGroup", workflow=self.test_workflow
)
self.test_workflowjob.group = self.test_workflowjobgroup
self.test_workflowjob.save()
# force valid=True
self.test_workflow.valid = True
self.test_workflow.save()
response = self.client.delete(
# "/api/workflowjobgroup/{0}/?format=json".format(self.test_workflowjobgroup.pk)
reverse(
"workflowjobgroup-detail",
kwargs={"pk": self.test_workflowjobgroup.pk}
) + "?format=json",
# format="json" # Format is not implemented in delete?
)
assert response.status_code == status.HTTP_204_NO_CONTENT, "this should pass"
self.test_workflow.refresh_from_db()
self.assertTrue(self.test_workflow.valid)
def test_importing_workflow_should_not_invalidate_origin_but_invalidate_target(
self
):
wf2 = mommy.make("rodan.Workflow", project=self.test_workflow.project)
response = self.client.post(
reverse("workflowjobgroup-list"),
{
# "workflow": "http://localhost:8000/api/workflow/{0}/".format(wf2.uuid),
"workflow": reverse("workflow-detail", kwargs={"pk": wf2.uuid}),
# "origin": "http://localhost:8000/api/workflow/{0}/".format(
# self.test_workflow.uuid
# ),
"origin": reverse("workflow-detail", kwargs={"pk": self.test_workflow.uuid}),
"name": "test",
},
format="json",
)
assert response.status_code == status.HTTP_201_CREATED, "this should pass"
self.test_workflow.refresh_from_db()
wf2.refresh_from_db()
self.assertTrue(self.test_workflow.valid)
self.assertFalse(wf2.valid)
class WorkflowExternPortsTestCase(
RodanTestTearDownMixin, APITestCase, RodanTestSetUpMixin
):
def setUp(self):
self.setUp_rodan()
self.setUp_user()
self.client.force_authenticate(user=self.test_superuser)
def _validate(self, workflow_uuid):
workflow_update = {"valid": True}
return self.client.patch(
# "/api/workflow/{0}/".format(workflow_uuid), workflow_update, format="json"
reverse("workflow-detail", kwargs={"pk": workflow_uuid}),
workflow_update,
format="json",
)
def test_simple_workflow(self):
self.setUp_simple_dummy_workflow()
response = self._validate(self.test_workflow.uuid)
assert response.status_code == status.HTTP_200_OK
ip_a = self.dummy_a_wfjob.input_ports.first()
op_a = self.dummy_a_wfjob.output_ports.first()
ip_m = self.dummy_m_wfjob.input_ports.first()
op_m = self.dummy_m_wfjob.output_ports.first()
self.assertTrue(ip_a.extern)
self.assertFalse(op_a.extern)
self.assertFalse(ip_m.extern)
self.assertTrue(op_m.extern)
def test_simple_workflow_update_all(self):
self.setUp_simple_dummy_workflow()
ip_a = self.dummy_a_wfjob.input_ports.first()
op_a = self.dummy_a_wfjob.output_ports.first()
ip_m = self.dummy_m_wfjob.input_ports.first()
op_m = self.dummy_m_wfjob.output_ports.first()
ip_a.extern = False
ip_a.save()
op_a.extern = True
op_a.save()
ip_m.extern = True
ip_m.save()
op_m.extern = False
op_m.save()
response = self._validate(self.test_workflow.uuid)
assert response.status_code == status.HTTP_200_OK
ip_a = self.dummy_a_wfjob.input_ports.first()
op_a = self.dummy_a_wfjob.output_ports.first()
ip_m = self.dummy_m_wfjob.input_ports.first()
op_m = self.dummy_m_wfjob.output_ports.first()
self.assertTrue(ip_a.extern)
self.assertFalse(op_a.extern)
self.assertFalse(ip_m.extern)
self.assertTrue(op_m.extern)
def test_complex_workflow(self):
self.setUp_complex_dummy_workflow()
response = self._validate(self.test_workflow.uuid)
assert response.status_code == status.HTTP_200_OK
# refetch and test
Aip = InputPort.objects.get(uuid=self.test_Aip.uuid)
self.assertTrue(Aip.extern)
Aop = OutputPort.objects.get(uuid=self.test_Aop.uuid)
self.assertFalse(Aop.extern)
Bop = OutputPort.objects.get(uuid=self.test_Bop.uuid)
self.assertFalse(Bop.extern)
Cip1 = InputPort.objects.get(uuid=self.test_Cip1.uuid)
self.assertFalse(Cip1.extern)
Cip2 = InputPort.objects.get(uuid=self.test_Cip2.uuid)
self.assertFalse(Cip2.extern)
Cop1 = OutputPort.objects.get(uuid=self.test_Cop1.uuid)
self.assertFalse(Cop1.extern)
Cop2 = OutputPort.objects.get(uuid=self.test_Cop2.uuid)
self.assertTrue(Cop2.extern)
Dip1 = InputPort.objects.get(uuid=self.test_Dip1.uuid)
self.assertTrue(Dip1.extern)
Dip2 = InputPort.objects.get(uuid=self.test_Dip2.uuid)
self.assertFalse(Dip2.extern)
Dop = OutputPort.objects.get(uuid=self.test_Dop.uuid)
self.assertFalse(Dop.extern)
Eip1 = InputPort.objects.get(uuid=self.test_Eip1.uuid)
self.assertFalse(Eip1.extern)
Eip2 = InputPort.objects.get(uuid=self.test_Eip2.uuid)
self.assertTrue(Eip2.extern)
Eop = OutputPort.objects.get(uuid=self.test_Eop.uuid)
self.assertTrue(Eop.extern)
Fip1 = InputPort.objects.get(uuid=self.test_Fip1.uuid)
self.assertTrue(Fip1.extern)
Fip2 = InputPort.objects.get(uuid=self.test_Fip2.uuid)
self.assertFalse(Fip2.extern)
Fop = OutputPort.objects.get(uuid=self.test_Fop.uuid)
self.assertTrue(Fop.extern)
| StarcoderdataPython |
4825782 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools import find_packages, setup
setup(
name='q2lint',
version='0.0.1',
license='BSD-3-Clause',
url='https://qiime2.org',
packages=find_packages(),
entry_points='''
[console_scripts]
q2lint=q2lint._main:main
''',
zip_safe=False,
package_data={'q2lint': ['REF_LICENSE']},
)
| StarcoderdataPython |
3467396 | <filename>dbots/rest/errors.py
import json
from ..errors import XenonException
__all__ = (
"HTTPException",
"HTTPNotFound",
"HTTPForbidden",
"HTTPBadRequest",
"HTTPTooManyRequests",
"HTTPUnauthorized"
)
class HTTPException(XenonException):
def __init__(self, status, message):
self.status = status
if isinstance(message, dict):
self.code = message.get("code", 0)
base = message.get("message", "")
errors = message.get("errors")
if errors is not None:
self.text = f"{base}\n{json.dumps(errors)}"
else:
self.text = base
else:
self.text = message
self.code = 0
fmt = '{0.status} (error code: {1}): {2}'
super().__init__(fmt.format(self, self.code, self.text))
class HTTPBadRequest(HTTPException):
def __init__(self, message):
super().__init__(400, message)
class HTTPUnauthorized(HTTPException):
def __init__(self, message):
super().__init__(401, message)
class HTTPForbidden(HTTPException):
def __init__(self, message):
super().__init__(403, message)
class HTTPNotFound(HTTPException):
def __init__(self, message):
super().__init__(404, message)
class HTTPTooManyRequests(HTTPException):
def __init__(self, message):
super().__init__(429, message)
| StarcoderdataPython |
5063845 | <filename>tests/guard/test_inquiry.py
import pytest
from vakt.guard import Inquiry
def test_default_values():
i = Inquiry()
assert '' == i.subject
assert '' == i.action
assert '' == i.resource
assert {} == i.context
def test_json_roundtrip():
i = Inquiry(resource='books:abc', action='view', subject='bobby', context={'ip': '127.0.0.1'})
s = i.to_json()
r1 = Inquiry.from_json(s)
assert 'books:abc' == r1.resource
assert 'view' == r1.action
assert 'bobby' == r1.subject
assert {'ip': '127.0.0.1'} == r1.context
def test_json_decode_fails_for_incorrect_data():
with pytest.raises(ValueError):
Inquiry.from_json('{')
def test_can_create_empty_inquiry():
i = Inquiry()
assert isinstance(i, Inquiry)
i2 = Inquiry.from_json('{}')
assert isinstance(i2, Inquiry)
def test_pretty_print():
i = Inquiry(resource='books:abc', action='view', context={'ip': '127.0.0.1'})
assert "<class 'vakt.guard.Inquiry'>" in str(i)
assert "'resource': 'books:abc'" in str(i)
assert "'action': 'view'" in str(i)
assert "'context': {'ip': '127.0.0.1'}" in str(i)
@pytest.mark.parametrize('first, second, must_equal', [
(
Inquiry(resource='books:abc', action='view', context={'ip': '127.0.0.1'}),
Inquiry(action='view', resource='books:abc', context={'ip': '127.0.0.1'}),
True,
),
(
Inquiry(action='view', resource='books:abc', context={'ip': '127.0.0.1'}),
Inquiry(resource='books:абс', action='view', context={'ip': '127.0.0.1'}),
False,
),
(
Inquiry(resource='books:абс', action='view', context={'ip': '127.0.0.1'}),
Inquiry(resource='books:абс', action='view', context={'ip': '127.0.0.1'}),
True,
),
(
Inquiry(),
Inquiry(),
True,
),
(
Inquiry(resource={'name': 'books:абс', 'loc': 'bar'},
subject={'id': 123, 'teams': (123, 789, '145')},
action={'name': 'view'},
context={'ip': '127.0.0.1'}),
Inquiry(resource={'name': 'books:абс', 'loc': 'bar'},
subject={'id': 123, 'teams': (123, 789, '145')},
action={'name': 'view'},
context={'ip': '127.0.0.1'}),
True,
),
(
Inquiry(resource={'name': 'books:абс', 'loc': 'bar'},
subject={'id': 123, 'teams': (123, 789, '145')},
action={'name': 'view'},
context={'ip': '127.0.0.1'}),
Inquiry(resource={'name': 'books:абс', 'loc': 'bar'},
subject={'id': 123, 'teams': 'str'},
action={'name': 'view'},
context={'ip': '127.0.0.1'}),
False,
),
(
Inquiry(resource={}, subject={}, action={}, context={}),
Inquiry(context={}, subject={}, action={}, resource={}),
True,
),
(
Inquiry(resource={'a': 'b', 'c': 'd'}, subject={'a': [1, 2, 3]}, action={}, context={}),
Inquiry(context={}, subject={'a': [1, 2, 3]}, action={}, resource={'c': 'd', 'a': 'b'}),
True,
),
])
def test_equals_and_equals_by_hash(first, second, must_equal):
if must_equal:
assert first == second
assert hash(first) == hash(second)
else:
assert first != second
assert hash(first) != hash(second)
| StarcoderdataPython |
11205768 | <reponame>ruslanlvivsky/python-algorithm<filename>swexpert/d2/sw_2001.py
test_cases = int(input().strip())
def catch_flies(y, x, m, mat):
total = 0
for i in range(y, y + m):
for j in range(x, x + m):
total += mat[i][j]
return total
for t in range(1, test_cases + 1):
n, m = tuple(map(int, input().strip().split()))
mat = [tuple(map(int, input().strip().split())) for _ in range(n)]
flies = 0
for i in range(n - m + 1):
for j in range(n - m + 1):
flies = max(flies, catch_flies(i, j, m, mat))
print('#{} {}'.format(t, flies))
| StarcoderdataPython |
80318 | <filename>oops_fhir/r4/code_system/modifier_type_codes.py
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["ModifierTypeCodes"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class ModifierTypeCodes:
"""
Modifier type Codes
This value set includes sample Modifier type codes.
Status: draft - Version: 4.0.1
Copyright This is an example set.
http://terminology.hl7.org/CodeSystem/modifiers
"""
a = CodeSystemConcept(
{
"code": "a",
"definition": "Repair of prior service or installation.",
"display": "Repair of prior service or installation",
}
)
"""
Repair of prior service or installation
Repair of prior service or installation.
"""
b = CodeSystemConcept(
{
"code": "b",
"definition": "Temporary service or installation.",
"display": "Temporary service or installation",
}
)
"""
Temporary service or installation
Temporary service or installation.
"""
c = CodeSystemConcept(
{
"code": "c",
"definition": "Treatment associated with TMJ.",
"display": "TMJ treatment",
}
)
"""
TMJ treatment
Treatment associated with TMJ.
"""
e = CodeSystemConcept(
{
"code": "e",
"definition": "Implant or associated with an implant.",
"display": "Implant or associated with an implant",
}
)
"""
Implant or associated with an implant
Implant or associated with an implant.
"""
rooh = CodeSystemConcept(
{
"code": "rooh",
"definition": "A Rush service or service performed outside of normal office hours.",
"display": "Rush or Outside of office hours",
}
)
"""
Rush or Outside of office hours
A Rush service or service performed outside of normal office hours.
"""
x = CodeSystemConcept({"code": "x", "definition": "None.", "display": "None"})
"""
None
None.
"""
class Meta:
resource = _resource
| StarcoderdataPython |
1633144 | """handle sessions
Revision ID: 7f317474332d
Revises: <PASSWORD>
Create Date: 2016-08-30 11:47:35.513396
"""
# revision identifiers, used by Alembic.
revision = "7<PASSWORD>"
down_revision = "<PASSWORD>"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"session",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("uuid", sa.String(length=256), nullable=True),
sa.Column("user", sa.String(length=256), nullable=True),
sa.Column("ip", sa.String(length=256), nullable=True),
sa.Column("ua", sa.String(length=2048), nullable=True),
sa.Column("timestamp", sa.DateTime(), nullable=True),
sa.Column("expire", sa.DateTime(), nullable=True),
sa.Column("permanent", sa.Boolean(), nullable=True),
sa.Column("api", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("uuid"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("session")
### end Alembic commands ###
| StarcoderdataPython |
1635890 | <filename>Dataset/Leetcode/train/102/29.py
class Solution:
def XXX(self, root: TreeNode) -> List[List[int]]:
d = collections.defaultdict(list)
def f(r, i):
if r:
d[i].append(r.val)
f(r.left, i + 1)
f(r.right, i + 1)
f(root, 0)
return [*d.values()]
| StarcoderdataPython |
11214656 | <gh_stars>1-10
# Copyright 2018 dhtech
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file
import lib
def generate(host, mgmt_fqdn, *args):
"""Arg format is domain:host.
The OS of the host is used to get the backend type.
"""
vault_prefix = ''
if lib.get_domain(host) == 'EVENT':
vault_prefix = '%s-' % lib.get_current_event()
vault_mount = '%sservices' % vault_prefix
deploy_domain = lib.get_domain(host)
deploy_gw, _ = lib.get_network_gateway(deploy_domain+'@DEPLOY')
deploy_networks, _ = lib.get_networks_with_name(deploy_domain+'@DEPLOY')
deploy_network, deploy_prefix = deploy_networks.split('/', 2)
deploy_conf = {
'gateway': deploy_gw,
'network': deploy_network,
'prefix': deploy_prefix
}
info = {'esxi': [], 'c7000': [], 'vault_mount': vault_mount,
'domain': lib.get_domain(host).lower(),
'deploy_conf': deploy_conf, 'ocp': False,
'ocp_domain': 'ocp-'+lib.get_domain(host).lower(),
'ocp_machines': []}
if mgmt_fqdn != 'no-rfc1918':
mgmt_network, _ = lib.get_ipv4_network(mgmt_fqdn)
_, mgmt_prefix = mgmt_network.split('/', 2)
mgmt_ip = lib.resolve_nodes_to_ip([mgmt_fqdn])
info['mgmt_if'] = {
'ip': mgmt_ip[mgmt_fqdn][0],
'prefix': mgmt_prefix
}
for arg in args:
if arg == 'ocp':
info['ocp'] = True
ocp_macs = [
{'name': 'r0a0', 'mac': 'E41D2DFC296A',
'mgmt-mac': 'E41D2DFC296C'},
{'name': 'r0a1', 'mac': 'E41D2DFC2826',
'mgmt-mac': 'E41D2DFC2828'},
{'name': 'r0a2', 'mac': 'E41D2DFC5F58',
'mgmt-mac': 'E41D2DFC5F60'},
{'name': 'r0a3', 'mac': 'E41D2DFC4554',
'mgmt-mac': 'E41D2DFC4556'},
{'name': 'r0a4', 'mac': '7CFE904142EE',
'mgmt-mac': '7CFE904142F0'},
{'name': 'r0a5', 'mac': '7CFE9041826C',
'mgmt-mac': '7CFE9041826E'},
{'name': 'r0a6', 'mac': '7CFE90413FE8',
'mgmt-mac': '7CFE90413FEA'},
{'name': 'r0a7', 'mac': 'E41D2DD3C842',
'mgmt-mac': 'E41D2DD3C844'},
{'name': 'r0a8', 'mac': 'E41D2DFC2B08',
'mgmt-mac': 'E41D2DFC2B0A'},
{'name': 'r0a9', 'mac': '7CFE9041327A',
'mgmt-mac': '7CFE9041327C'},
{'name': 'r0b0', 'mac': '7CFE904103EE',
'mgmt-mac': '7CFE904103F0'},
{'name': 'r0b1', 'mac': 'E41D2DFC2B50',
'mgmt-mac': 'E41D2DFC2B52'},
{'name': 'r0b2', 'mac': '7CFE904136A0',
'mgmt-mac': '7CFE904136A2'},
{'name': 'r0b3', 'mac': 'E41D2DFC4164',
'mgmt-mac': 'E41D2DFC4164'},
{'name': 'r0b4', 'mac': '7CFE90428E2C',
'mgmt-mac': '7CFE90428E2E'},
{'name': 'r0b5', 'mac': '7CFE9041FE08',
'mgmt-mac': '7CFE9041FE0A'},
{'name': 'r0b6', 'mac': 'E41D2DFC177C',
'mgmt-mac': 'E41D2DFC177E'},
{'name': 'r0b7', 'mac': 'E41D2DFCC594',
'mgmt-mac': 'E41D2DFCC596'},
{'name': 'r0b8', 'mac': 'E41D2DFC2A30',
'mgmt-mac': 'E41D2DFC2A32'},
{'name': 'r0b9', 'mac': 'E41D2DFCC180',
'mgmt-mac': 'E41D2DFCC182'},
{'name': 'r0c0', 'mac': 'E41D2DFCC648',
'mgmt-mac': 'E41D2DFCC64A'},
{'name': 'r0c1', 'mac': '7CFE90428088',
'mgmt-mac': '7CFE9042808A'},
{'name': 'r0c2', 'mac': '7CFE904182EA',
'mgmt-mac': '7CFE904182EC'},
{'name': 'r0c3', 'mac': 'E41D2DFC890A',
'mgmt-mac': 'E41D2DFC890C'},
{'name': 'r0c4', 'mac': '7CFE90418224',
'mgmt-mac': '7CFE90418226'},
{'name': 'r0c5', 'mac': 'E41D2DFC2838',
'mgmt-mac': 'E41D2DFC283A'},
{'name': 'r0c6', 'mac': '7CFE90428E98',
'mgmt-mac': '7CFE90428E9A'},
{'name': 'r0c7', 'mac': '7CFE90429C60',
'mgmt-mac': '7CFE90429C62'},
{'name': 'r0c8', 'mac': '7CFE90429CF0',
'mgmt-mac': '7CFE90429CF2'},
{'name': 'r0c9', 'mac': '7CFE9041AAC8',
'mgmt-mac': '7CFE9041AACA'},
{'name': 'r1a0', 'mac': 'E41D2DFC17D6',
'mgmt-mac': 'E41D2DFC17D8'},
{'name': 'r1a1', 'mac': '248A07907064',
'mgmt-mac': '248A07907066'},
{'name': 'r1a2', 'mac': '7CFE9041328C',
'mgmt-mac': '7CFE9041328E'},
{'name': 'r1a3', 'mac': 'E41D2DFC16B6',
'mgmt-mac': 'E41D2DFC16B8'},
{'name': 'r1a4', 'mac': '7CFE90423F24',
'mgmt-mac': '7CFE90423F26'},
{'name': 'r1a5', 'mac': '7CFE9041CE02',
'mgmt-mac': '7CFE9041CE04'},
{'name': 'r1a6', 'mac': '7CFE9041102A',
'mgmt-mac': '7CFE9041102C'},
{'name': 'r1a7', 'mac': '7CFE9042C53A',
'mgmt-mac': '7CFE9042C53C'},
{'name': 'r1a8', 'mac': '7CFE9042C6A2',
'mgmt-mac': '7CFE9042C6A4'},
{'name': 'r1a9', 'mac': '7CFE90423DF2',
'mgmt-mac': '7CFE90423DF4'},
{'name': 'r1b0', 'mac': 'E41D2DFC7F7A',
'mgmt-mac': 'E41D2DFC7F7C'},
{'name': 'r1b1', 'mac': '248A079070AC',
'mgmt-mac': '248A079070AE'},
{'name': 'r1b2', 'mac': 'E41D2DFC8E86',
'mgmt-mac': 'E41D2DFC8E88'},
{'name': 'r1b3', 'mac': '7CFE90428F5E',
'mgmt-mac': '7CFE90428F60'},
{'name': 'r1b4', 'mac': 'E41D2D54019C',
'mgmt-mac': 'E41D2D54019E'},
{'name': 'r1b5', 'mac': '7CFE9042E1B4',
'mgmt-mac': '7CFE9042E1B6'},
{'name': 'r1b6', 'mac': '7CFE9041FEF2',
'mgmt-mac': '7CFE9041FEF4'},
{'name': 'r1b7', 'mac': '7CFE90412B3C',
'mgmt-mac': '7CFE90412B3E'},
{'name': 'r1b8', 'mac': '7CFE90410ED4',
'mgmt-mac': '7CFE90410ED6'},
{'name': 'r1b9', 'mac': '7CFE9041A516',
'mgmt-mac': '7CFE9041A518'},
{'name': 'r1c0', 'mac': 'E41D2DFC2A8A',
'mgmt-mac': 'E41D2DFC2A8C'},
{'name': 'r1c1', 'mac': 'E41D2D53FF5C',
'mgmt-mac': 'E41D2D53FF5E'},
{'name': 'r1c2', 'mac': '7CFE90424188',
'mgmt-mac': '7CFE9042418A'},
{'name': 'r1c3', 'mac': 'E41D2DFCCE28',
'mgmt-mac': 'E41D2DFCCE2A'},
{'name': 'r1c4', 'mac': '7CFE90422946',
'mgmt-mac': '7CFE90422948'},
{'name': 'r1c5', 'mac': '7CFE9042E340',
'mgmt-mac': '7CFE9042E342'},
{'name': 'r1c6', 'mac': '7CFE9041354A',
'mgmt-mac': '7CFE9041354C'},
{'name': 'r1c7', 'mac': '7CFE9041AB34',
'mgmt-mac': '7CFE9041AB36'},
{'name': 'r1c8', 'mac': '7CFE9042CB34',
'mgmt-mac': '7CFE9042CB36'},
{'name': 'r1c9', 'mac': 'E41D2DFC2A66',
'mgmt-mac': 'E41D2DFC2A68'}
]
# Building dhcp static lease configuration for dhcpd.
leases = []
lastoctet = 100
for line in ocp_macs:
t = iter(line['mgmt-mac'])
mgmt_mac = ':'.join(a+b for a, b in zip(t, t))
d = {}
d['name'] = line['name']
d['mac'] = line['mac']
d['macshort'] = line['mac']
d['mgmt-mac'] = mgmt_mac
d['ip'] = '10.32.12.'+str(lastoctet)
lastoctet += 1
leases.append(d)
info['ocp_machines'].extend(leases)
continue
domain, host = arg.split(':', 2)
os = lib.get_os(host)
ip = lib.resolve_nodes_to_ip([host])
backend = {
'ip': ip[host][0],
'fqdn': host,
'domain': domain
}
if os == 'vcenter' or os == 'esxi':
info['esxi'].append(backend)
elif os == 'c7000':
info['c7000'].append(backend)
return {'provision': info}
# vim: ts=4: sts=4: sw=4: expandtab
| StarcoderdataPython |
8071199 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:compiler_env_state."""
import json
from io import StringIO
from pathlib import Path
import pytest
from pydantic import ValidationError as PydanticValidationError
from compiler_gym import CompilerEnvState, CompilerEnvStateWriter
from compiler_gym.compiler_env_state import CompilerEnvStateReader
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_state_from_dict_empty():
with pytest.raises(PydanticValidationError):
CompilerEnvState(**{})
def test_state_invalid_walltime():
with pytest.raises(PydanticValidationError, match="Walltime cannot be negative"):
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=-1,
reward=1.5,
commandline="",
)
def test_state_to_json_from_dict():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward == 1.5
assert state_from_dict.commandline == "-a -b -c"
def test_state_to_json_from_dict_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward is None
assert state_from_dict.commandline == "-a -b -c"
def test_state_equality_different_types():
state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert not state == 5 # noqa testing __eq__
assert state != 5 # testing __ne__
def test_state_equality_same():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_differnt_walltime():
"""Test that walltime is not compared."""
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_one_sided_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_equal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_unequal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=3,
)
assert not a == b # noqa testing __eq__
assert not b == a # noqatesting __eq__
assert a != b # testing __ne__
assert b != a # testing __ne__
def test_compiler_env_state_writer():
buf = StringIO()
writer = CompilerEnvStateWriter(buf)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_writer_no_header():
buf = StringIO()
writer = CompilerEnvStateWriter(buf, header=False)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == "benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
@pytest.mark.parametrize("flush", range(1))
def test_compiler_env_state_writer_with_statement(tmpwd: Path, flush: bool):
path = Path("results.csv")
assert not path.is_file() # Sanity check.
f = open(path, "w")
with CompilerEnvStateWriter(f) as writer:
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=flush,
)
assert f.closed
with open(path) as f:
assert f.read() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_reader():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_no_header():
buf = StringIO("benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header_out_of_order_columns():
buf = StringIO(
"commandline,reward,benchmark,walltime\n"
"-a -b -c,2.0,benchmark://cbench-v0/foo,5.0\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_empty_input():
buf = StringIO("")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_compiler_env_state_reader_header_only():
buf = StringIO("benchmark,reward,walltime,commandline\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_state_from_csv_invalid_format():
buf = StringIO("abcdef\n")
reader = CompilerEnvStateReader(buf)
with pytest.raises(
ValueError, match=r"Expected 4 columns in the first row of CSV: \['abcdef'\]"
):
next(iter(reader))
def test_state_serialize_deserialize_equality():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward == 1.5
assert state_from_csv.commandline == "-a -b -c"
def test_state_serialize_deserialize_equality_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward is None
assert state_from_csv.commandline == "-a -b -c"
if __name__ == "__main__":
main()
| StarcoderdataPython |
87761 | <reponame>sysdiglabs/syscli
# Copyright 2018 Sysdig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sdc.sdc_enums import EXIT_CODES
from sdc.sdc_extend import SdMonitorClient, SdSecureClient
def show_users(sdmonitor: SdMonitorClient):
ok, data = sdmonitor.get_users()
if not ok:
print(data)
return EXIT_CODES.ERR_METHOD_NOT_FOUND
print("%-6s %-38s %-13s %-15s %-7s" % ("ID", "USERNAME", "FIRSTNAME", "LASTNAME", "ACTIVE"))
for user in data:
print("%-6d %-38s %-13s %-15s %-7s" % (user['id'], user['username'], user['firstName'], user['lastName'],
'enabled' if user['enabled'] else 'disabled'))
return EXIT_CODES.OK
def show_dashboards(sdmonitor: SdMonitorClient):
ok, data = sdmonitor.get_dashboards()
if not ok:
print(data)
return EXIT_CODES.ERR_METHOD_NOT_FOUND
print("%-6s %-48s %-38s %-15s %-7s %-7s" % ("ID", "NAME", "USER", "AUTOCREATED", "SHARED", "PUBLIC"))
for dashboard in data['dashboards']:
print("%-6d %-48s %-38s %-15s %-7s %-7s" % (dashboard['id'],
dashboard['name'].strip(),
dashboard['username'].strip(),
'yes' if dashboard['autoCreated'] else 'no',
'yes' if dashboard['isShared'] else 'no',
'yes' if dashboard['isPublic'] else 'no'))
return EXIT_CODES.OK
def show_policies(sdsecure: SdSecureClient):
ok, data = sdsecure.list_policies()
if not ok:
print(data)
return EXIT_CODES.ERR_METHOD_NOT_FOUND
print("%-6s %-100s %-8s %-15s %-7s" % ("ID", "NAME", "SEVERITY", "AUTOCREATED", "NOTIFICATION"))
for policy in data['policies']:
print("%-6d %-100s %-8s %-15s %-7s" % (policy['id'],
policy['name'].strip(),
policy['severity'],
'yes' if policy['isBuiltin'] else 'no',
len(policy['notificationChannelIds'])))
return EXIT_CODES.OK
| StarcoderdataPython |
9670639 | #!/usr/bin/env python
from __future__ import print_function
import os
import cv2
import numpy as np
import ugv_bot.srv
from ugv_bot.srv import SendImu, SendImuResponse
import rospy
import serial
from sensor_msgs.msg import Imu
uno=serial.Serial(port='/dev/ttyACM0', baudrate='9600', timeout=0.01)
def read_serial():
global ax
global ay
global az
global gx
global gy
global gz
global mx
global my
global mz
data=uno.readline()
data=data.decode()
data=data.replace("\n", "")
sdata=data.split(',')
if len(sdata) == 9:
try :
for i in range(9):
float(sdata[i])
ax = float(sdata[0])
ay = float(sdata[1])
az = float(sdata[2])
gx = float(sdata[3])
gy = float(sdata[4])
gz = float(sdata[5])
mx = float(sdata[3])
my = float(sdata[4])
mz = float(sdata[5])
#gz = float(sdata[5].replace('\r\n', ''))
#print(ax,ay,az,gx,gy,gz,"||")
print("----------Varibles Updated----------")
except:
print("Not A Float !!!!")
def handle_imu(request):
# global imu_data
#print(os.path.dirname(os.getcwd())+"/catkin_ws/src/ugv_bot/Scripts/test_img.png")
global ax
global ay
global az
global gx
global gy
global gz
global mx
global my
global mz
if VERBOSE:
print("Request Variable",request)
print("Request Variable type",type(request))
print("-----------------------------------------")
if request:
read_serial()
srv_msg=SendImuResponse()
srv_msg.angular_velocity.x = gx
srv_msg.angular_velocity.y = gy
srv_msg.angular_velocity.z = gz
srv_msg.linear_acceleration.x = ax
srv_msg.linear_acceleration.y = ay
srv_msg.linear_acceleration.z = az
srv_msg.magnetometer.x = mx
srv_msg.magnetometer.y = my
srv_msg.magnetometer.z = mz
srv_msg.header.seq=0
srv_msg.header.frame_id='odom'
srv_msg.header.stamp=rospy.Time.now()
print("Hey Homie........")
print("Done Sending !!!!")
print("===============================================")
return srv_msg
# def imu_redirector(ros_data):
# global imu_data
# imu_data = ros_data
# print('===========')
if __name__ == "__main__":
VERBOSE = False
rospy.init_node('imu_server')
#rospy.Subscriber("/Imu_accel",Imu ,imu_redirector,queue_size = 1)
s = rospy.Service('get_imu_service',SendImu , handle_imu)
print("Ready to send imu.")
rospy.spin() | StarcoderdataPython |
5050723 | """Contains the time command"""
import traceback
import json
import datetime
import configparser
import botutils
from library import display_time
from discord.ext import commands
from botc import check_if_is_player, Phase
from botc.gameloops import base_day_loop, calculate_base_day_duration, debate_timer, \
nomination_loop
Config = configparser.ConfigParser()
Config.read("preferences.INI")
# Lengths
BASE_NIGHT = int(Config["botc"]["BASE_NIGHT"])
NIGHT_MULTIPLER = int(Config["botc"]["NIGHT_MULTIPLER"])
BASE_DAWN = int(Config["botc"]["BASE_DAWN"])
DAWN_MULTIPLIER = int(Config["botc"]["DAWN_MULTIPLIER"])
DEBATE_TIME = int(Config["botc"]["DEBATE_TIME"])
INCREMENT = int(Config["botc"]["INCREMENT"])
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
with open('botc/game_text.json') as json_file:
documentation = json.load(json_file)
time_night = documentation["gameplay"]["time_night"]
time_dawn = documentation["gameplay"]["time_dawn"]
time_day_base = documentation["gameplay"]["time_day_base"]
time_voting = documentation["gameplay"]["time_voting"]
time_debate = documentation["gameplay"]["time_debate"]
time_nomination = documentation["gameplay"]["time_nomination"]
class Time(commands.Cog, name = documentation["misc"]["townhall_cog"]):
"""BoTC in-game commands cog
Time command - used for viewing the time left for each different phase or
stage of the game
"""
def __init__(self, client):
self.client = client
def cog_check(self, ctx):
"""Check the channel of the context, return True if it is sent in
lobby or in spectators chat
Admins can bypass.
"""
return botutils.check_if_admin(ctx) or \
check_if_is_player(ctx) or \
botutils.check_if_spec(ctx)
# ---------- TIME COMMAND ----------------------------------------
@commands.command(
pass_context = True,
name = "time",
aliases = ["t"],
hidden = False,
brief = documentation["doc"]["time"]["brief"],
help = documentation["doc"]["time"]["help"],
description = documentation["doc"]["time"]["description"]
)
async def time(self, ctx):
"""Time command
usage: time
can be used by all players or in DM
"""
import globvars
# Day phase
if globvars.master_state.game.current_phase == Phase.day:
# Day phase: pre-nomination (base day phase)
if base_day_loop.is_running():
start_time = base_day_loop.next_iteration
total_duration = calculate_base_day_duration(globvars.master_state.game)
__time_elapsed = (datetime.datetime.now(datetime.timezone.utc) - start_time).seconds
time_left = total_duration - __time_elapsed
msg = time_day_base.format(
display_time(total_duration),
"is" if time_left == 1 or (time_left >= 60 and time_left < 120) else "are",
display_time(time_left)
)
await ctx.send(msg)
# Day phase: nomination loop is running
elif nomination_loop.is_running():
# We are in the debate phase
if debate_timer.is_running():
end_time = debate_timer.next_iteration
total_duration = DEBATE_TIME
time_left = (end_time - datetime.datetime.now(datetime.timezone.utc)).seconds
msg = time_debate.format(
display_time(total_duration),
display_time(time_left)
)
await ctx.send(msg)
# We are in the active voting phase
else:
msg = time_voting
await ctx.send(msg)
# Day phase: waiting for a nomination
else:
start_time = globvars.master_state.game.nomination_iteration_date[0]
duration = globvars.master_state.game.nomination_iteration_date[1]
time_left = duration - (datetime.datetime.now() - start_time).seconds
msg = time_nomination.format(
display_time(duration),
display_time(time_left)
)
await ctx.send(msg)
# Night phase
elif globvars.master_state.game.current_phase == Phase.night:
min_night_duration = BASE_NIGHT
max_night_duration = BASE_NIGHT + NIGHT_MULTIPLER * INCREMENT
__time_elapsed = (datetime.datetime.now() - globvars.master_state.game.night_start_time).seconds
time_left = max_night_duration - __time_elapsed
msg = time_night.format(
display_time(min_night_duration),
display_time(max_night_duration),
"is" if time_left == 1 or (time_left >= 60 and time_left < 120) else "are",
display_time(time_left)
)
await ctx.send(msg)
# Dawn phase
elif globvars.master_state.game.current_phase == Phase.dawn:
min_dawn_duration = BASE_DAWN
max_dawn_duration = BASE_DAWN + DAWN_MULTIPLIER * INCREMENT
__time_elapsed = (datetime.datetime.now() - globvars.master_state.game.dawn_start_time).seconds
time_left = max_dawn_duration - __time_elapsed
msg = time_dawn.format(
display_time(min_dawn_duration),
display_time(max_dawn_duration),
"is" if time_left == 1 or (time_left >= 60 and time_left < 120) else "are",
display_time(time_left)
)
await ctx.send(msg)
@time.error
async def time_error(self, ctx, error):
# Check did not pass -> commands.CheckFailure
if isinstance(error, commands.CheckFailure):
return
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc())
| StarcoderdataPython |
1948108 | from setuptools import setup
version = {}
with open("particlizer/version.py") as f:
exec(f.read(), version)
setup(name='particlizer',
version=version['__version__'],
description='Turn images into particlized interactive animations',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/AdamSpannbauer/particlizer',
packages=['particlizer'],
license='MIT',
install_requires=[
'numpy',
'imutils',
],
extras_require={
'cv2': ['opencv-contrib-python >= 3.4.0']
},
keywords=['computer vision', 'image processing', 'opencv'],
)
| StarcoderdataPython |
1796439 | <reponame>VasuXD/YukkiMusicBot
#
# Copyright (C) 2021-2022 by TeamYukki<EMAIL>, < https://github.com/TeamYukki >.
#
# This file is part of < https://github.com/TeamYukki/YukkiMusicBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamYukki/YukkiMusicBot/blob/master/LICENSE >
#
# All rights reserved.
import asyncio
from datetime import datetime, timedelta
from typing import Union
from pyrogram import Client
from pyrogram.errors import (ChatAdminRequired,
UserAlreadyParticipant,
UserNotParticipant)
from pyrogram.types import InlineKeyboardMarkup
from pytgcalls import PyTgCalls, StreamType
from pytgcalls.exceptions import (AlreadyJoinedError,
NoActiveGroupCall,
TelegramServerError)
from pytgcalls.types import (JoinedGroupCallParticipant,
LeftGroupCallParticipant, Update)
from pytgcalls.types.input_stream import AudioPiped, AudioVideoPiped
from pytgcalls.types.stream import StreamAudioEnded
import config
from strings import get_string
from YukkiMusic import LOGGER, YouTube, app
from YukkiMusic.misc import db
from YukkiMusic.utils.database import (add_active_chat,
add_active_video_chat,
get_assistant,
get_audio_bitrate, get_lang,
get_loop, get_video_bitrate,
group_assistant, is_autoend,
music_on, mute_off,
remove_active_chat,
remove_active_video_chat,
set_loop)
from YukkiMusic.utils.exceptions import AssistantErr
from YukkiMusic.utils.inline.play import (stream_markup,
telegram_markup)
from YukkiMusic.utils.stream.autoclear import auto_clean
from YukkiMusic.utils.thumbnails import gen_thumb
autoend = {}
counter = {}
AUTO_END_TIME = 3
async def _clear_(chat_id):
db[chat_id] = []
await remove_active_video_chat(chat_id)
await remove_active_chat(chat_id)
class Call(PyTgCalls):
def __init__(self):
self.userbot1 = Client(
api_id=config.API_ID,
api_hash=config.API_HASH,
session_name=str(config.STRING1),
)
self.one = PyTgCalls(
self.userbot1,
cache_duration=100,
)
self.userbot2 = Client(
api_id=config.API_ID,
api_hash=config.API_HASH,
session_name=str(config.STRING2),
)
self.two = PyTgCalls(
self.userbot2,
cache_duration=100,
)
self.userbot3 = Client(
api_id=config.API_ID,
api_hash=config.API_HASH,
session_name=str(config.STRING3),
)
self.three = PyTgCalls(
self.userbot3,
cache_duration=100,
)
self.userbot4 = Client(
api_id=config.API_ID,
api_hash=config.API_HASH,
session_name=str(config.STRING4),
)
self.four = PyTgCalls(
self.userbot4,
cache_duration=100,
)
self.userbot5 = Client(
api_id=config.API_ID,
api_hash=config.API_HASH,
session_name=str(config.STRING5),
)
self.five = PyTgCalls(
self.userbot5,
cache_duration=100,
)
async def pause_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
await assistant.pause_stream(chat_id)
async def resume_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
await assistant.resume_stream(chat_id)
async def mute_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
await assistant.mute_stream(chat_id)
async def unmute_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
await assistant.unmute_stream(chat_id)
async def stop_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
try:
await _clear_(chat_id)
await assistant.leave_group_call(chat_id)
except:
pass
async def force_stop_stream(self, chat_id: int):
assistant = await group_assistant(self, chat_id)
try:
check = db.get(chat_id)
check.pop(0)
except:
pass
await remove_active_video_chat(chat_id)
await remove_active_chat(chat_id)
try:
await assistant.leave_group_call(chat_id)
except:
pass
async def skip_stream(
self, chat_id: int, link: str, video: Union[bool, str] = None
):
assistant = await group_assistant(self, chat_id)
audio_stream_quality = await get_audio_bitrate(chat_id)
video_stream_quality = await get_video_bitrate(chat_id)
stream = (
AudioVideoPiped(
link,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if video
else AudioPiped(
link, audio_parameters=audio_stream_quality
)
)
await assistant.change_stream(
chat_id,
stream,
)
async def seek_stream(
self, chat_id, file_path, to_seek, duration, mode
):
assistant = await group_assistant(self, chat_id)
audio_stream_quality = await get_audio_bitrate(chat_id)
video_stream_quality = await get_video_bitrate(chat_id)
stream = (
AudioVideoPiped(
file_path,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
additional_ffmpeg_parameters=f"-ss {to_seek} -to {duration}",
)
if mode == "video"
else AudioPiped(
file_path,
audio_parameters=audio_stream_quality,
additional_ffmpeg_parameters=f"-ss {to_seek} -to {duration}",
)
)
await assistant.change_stream(chat_id, stream)
async def stream_call(self, link):
assistant = await group_assistant(self, config.LOG_GROUP_ID)
await assistant.join_group_call(
config.LOG_GROUP_ID,
AudioVideoPiped(link),
stream_type=StreamType().pulse_stream,
)
await asyncio.sleep(0.5)
await assistant.leave_group_call(config.LOG_GROUP_ID)
async def join_assistant(self, original_chat_id, chat_id):
language = await get_lang(original_chat_id)
_ = get_string(language)
userbot = await get_assistant(chat_id)
try:
try:
get = await app.get_chat_member(chat_id, userbot.id)
except ChatAdminRequired:
raise AssistantErr(_["call_1"])
if get.status == "banned" or get.status == "kicked":
raise AssistantErr(
_["call_2"].format(userbot.username, userbot.id)
)
except UserNotParticipant:
chat = await app.get_chat(chat_id)
if chat.username:
try:
await userbot.join_chat(chat.username)
except UserAlreadyParticipant:
pass
except Exception as e:
raise AssistantErr(_["call_3"].format(e))
else:
try:
try:
try:
invitelink = chat.invite_link
if invitelink is None:
invitelink = (
await app.export_chat_invite_link(
chat_id
)
)
except:
invitelink = (
await app.export_chat_invite_link(
chat_id
)
)
except ChatAdminRequired:
raise AssistantErr(_["call_4"])
except Exception as e:
raise AssistantErr(e)
m = await app.send_message(
original_chat_id, _["call_5"]
)
if invitelink.startswith("https://t.me/+"):
invitelink = invitelink.replace(
"https://t.me/+", "https://t.me/joinchat/"
)
await asyncio.sleep(3)
await userbot.join_chat(invitelink)
await asyncio.sleep(4)
await m.edit(_["call_6"].format(userbot.name))
except UserAlreadyParticipant:
pass
except Exception as e:
raise AssistantErr(_["call_3"].format(e))
async def join_call(
self,
chat_id: int,
original_chat_id: int,
link,
video: Union[bool, str] = None,
):
assistant = await group_assistant(self, chat_id)
audio_stream_quality = await get_audio_bitrate(chat_id)
video_stream_quality = await get_video_bitrate(chat_id)
stream = (
AudioVideoPiped(
link,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if video
else AudioPiped(
link, audio_parameters=audio_stream_quality
)
)
try:
await assistant.join_group_call(
chat_id,
stream,
stream_type=StreamType().pulse_stream,
)
except NoActiveGroupCall:
try:
await self.join_assistant(original_chat_id, chat_id)
except Exception as e:
raise e
try:
await assistant.join_group_call(
chat_id,
stream,
stream_type=StreamType().pulse_stream,
)
except Exception as e:
raise AssistantErr(
"**No Active Voice Chat Found**\n\nPlease make sure group's voice chat is enabled. If already enabled, please end it and start fresh voice chat again and if the problem continues, try /restart"
)
except AlreadyJoinedError:
raise AssistantErr(
"**Assistant Already in Voice Chat**\n\nSystems have detected that assistant is already there in the voice chat, this issue generally comes when you play 2 queries together.\n\nIf assistant is not present in voice chat, please end voice chat and start fresh voice chat again and if the problem continues, try /restart"
)
except TelegramServerError:
raise AssistantErr(
"**Telegram Server Error**\n\nTelegram is having some internal server problems, Please try playing again.\n\n If this problem keeps coming everytime, please end your voice chat and start fresh voice chat again."
)
await add_active_chat(chat_id)
await mute_off(chat_id)
await music_on(chat_id)
if video:
await add_active_video_chat(chat_id)
if await is_autoend():
counter[chat_id] = {}
users = len(await assistant.get_participants(chat_id))
if users == 1:
autoend[chat_id] = datetime.now() + timedelta(
minutes=AUTO_END_TIME
)
async def change_stream(self, client, chat_id):
check = db.get(chat_id)
popped = None
loop = await get_loop(chat_id)
try:
if loop == 0:
popped = check.pop(0)
else:
loop = loop - 1
await set_loop(chat_id, loop)
if popped:
if config.AUTO_DOWNLOADS_CLEAR == str(True):
await auto_clean(popped)
if not check:
await _clear_(chat_id)
return await client.leave_group_call(chat_id)
except:
try:
await _clear_(chat_id)
return await client.leave_group_call(chat_id)
except:
return
else:
queued = check[0]["file"]
language = await get_lang(chat_id)
_ = get_string(language)
title = (check[0]["title"]).title()
user = check[0]["by"]
original_chat_id = check[0]["chat_id"]
streamtype = check[0]["streamtype"]
audio_stream_quality = await get_audio_bitrate(chat_id)
video_stream_quality = await get_video_bitrate(chat_id)
videoid = check[0]["vidid"]
check[0]["played"] = 0
if "live_" in queued:
n, link = await YouTube.video(videoid, True)
if n == 0:
return await app.send_message(
original_chat_id,
text=_["call_9"],
)
stream = (
AudioVideoPiped(
link,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if str(streamtype) == "video"
else AudioPiped(
link, audio_parameters=audio_stream_quality
)
)
try:
await client.change_stream(chat_id, stream)
except Exception:
return await app.send_message(
original_chat_id,
text=_["call_9"],
)
img = await gen_thumb(videoid)
button = telegram_markup(_, chat_id)
run = await app.send_photo(
original_chat_id,
photo=img,
caption=_["stream_1"].format(
user,
f"https://t.me/{app.username}?start=info_{videoid}",
),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "tg"
elif "vid_" in queued:
mystic = await app.send_message(
original_chat_id, _["call_10"]
)
try:
file_path, direct = await YouTube.download(
videoid,
mystic,
videoid=True,
video=True
if str(streamtype) == "video"
else False,
)
except:
return await mystic.edit_text(
_["call_9"], disable_web_page_preview=True
)
stream = (
AudioVideoPiped(
file_path,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if str(streamtype) == "video"
else AudioPiped(
file_path,
audio_parameters=audio_stream_quality,
)
)
try:
await client.change_stream(chat_id, stream)
except Exception:
return await app.send_message(
original_chat_id,
text=_["call_9"],
)
img = await gen_thumb(videoid)
button = stream_markup(_, videoid, chat_id)
await mystic.delete()
run = await app.send_photo(
original_chat_id,
photo=img,
caption=_["stream_1"].format(
user,
f"https://t.me/{app.username}?start=info_{videoid}",
),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "stream"
elif "index_" in queued:
stream = (
AudioVideoPiped(
videoid,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if str(streamtype) == "video"
else AudioPiped(
videoid, audio_parameters=audio_stream_quality
)
)
try:
await client.change_stream(chat_id, stream)
except Exception:
return await app.send_message(
original_chat_id,
text=_["call_9"],
)
button = telegram_markup(_, chat_id)
run = await app.send_photo(
original_chat_id,
photo=config.STREAM_IMG_URL,
caption=_["stream_2"].format(user),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "tg"
else:
stream = (
AudioVideoPiped(
queued,
audio_parameters=audio_stream_quality,
video_parameters=video_stream_quality,
)
if str(streamtype) == "video"
else AudioPiped(
queued, audio_parameters=audio_stream_quality
)
)
try:
await client.change_stream(chat_id, stream)
except Exception:
return await app.send_message(
original_chat_id,
text=_["call_9"],
)
if videoid == "telegram":
button = telegram_markup(_, chat_id)
run = await app.send_photo(
original_chat_id,
photo=config.TELEGRAM_AUDIO_URL
if str(streamtype) == "audio"
else config.TELEGRAM_VIDEO_URL,
caption=_["stream_3"].format(
title, check[0]["dur"], user
),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "tg"
elif videoid == "soundcloud":
button = telegram_markup(_, chat_id)
run = await app.send_photo(
original_chat_id,
photo=config.SOUNCLOUD_IMG_URL,
caption=_["stream_3"].format(
title, check[0]["dur"], user
),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "tg"
else:
img = await gen_thumb(videoid)
button = stream_markup(_, videoid, chat_id)
run = await app.send_photo(
original_chat_id,
photo=img,
caption=_["stream_1"].format(
user,
f"https://t.me/{app.username}?start=info_{videoid}",
),
reply_markup=InlineKeyboardMarkup(button),
)
db[chat_id][0]["mystic"] = run
db[chat_id][0]["markup"] = "stream"
async def ping(self):
pings = []
if config.STRING1:
pings.append(await self.one.ping)
if config.STRING2:
pings.append(await self.two.ping)
if config.STRING3:
pings.append(await self.three.ping)
if config.STRING4:
pings.append(await self.four.ping)
if config.STRING5:
pings.append(await self.five.ping)
return str(round(sum(pings) / len(pings), 3))
async def start(self):
LOGGER(__name__).info("Starting PyTgCalls Client\n")
if config.STRING1:
await self.one.start()
if config.STRING2:
await self.two.start()
if config.STRING3:
await self.three.start()
if config.STRING4:
await self.four.start()
if config.STRING5:
await self.five.start()
async def decorators(self):
@self.one.on_kicked()
@self.two.on_kicked()
@self.three.on_kicked()
@self.four.on_kicked()
@self.five.on_kicked()
@self.one.on_closed_voice_chat()
@self.two.on_closed_voice_chat()
@self.three.on_closed_voice_chat()
@self.four.on_closed_voice_chat()
@self.five.on_closed_voice_chat()
@self.one.on_left()
@self.two.on_left()
@self.three.on_left()
@self.four.on_left()
@self.five.on_left()
async def stream_services_handler(_, chat_id: int):
await self.stop_stream(chat_id)
@self.one.on_stream_end()
@self.two.on_stream_end()
@self.three.on_stream_end()
@self.four.on_stream_end()
@self.five.on_stream_end()
async def stream_end_handler1(client, update: Update):
if not isinstance(update, StreamAudioEnded):
return
await self.change_stream(client, update.chat_id)
@self.one.on_participants_change()
@self.two.on_participants_change()
@self.three.on_participants_change()
@self.four.on_participants_change()
@self.five.on_participants_change()
async def participants_change_handler(client, update: Update):
if not isinstance(
update, JoinedGroupCallParticipant
) and not isinstance(update, LeftGroupCallParticipant):
return
chat_id = update.chat_id
users = counter.get(chat_id)
if not users:
try:
got = len(await client.get_participants(chat_id))
except:
return
counter[chat_id] = got
if got == 1:
autoend[chat_id] = datetime.now() + timedelta(
minutes=AUTO_END_TIME
)
return
autoend[chat_id] = {}
else:
final = (
users + 1
if isinstance(update, JoinedGroupCallParticipant)
else users - 1
)
counter[chat_id] = final
if final == 1:
autoend[chat_id] = datetime.now() + timedelta(
minutes=AUTO_END_TIME
)
return
autoend[chat_id] = {}
Yukki = Call()
| StarcoderdataPython |
5050708 | from scraper import html_scraper, scrap_them_all
from utils import preprocess, preprocess_lematize, strings_concatenate
from word_cloud import generate_wordcloud
from summarization import summarization_freq, save_summary, summarizer_sumy
from goose3 import Goose
global_url = "http://www.olharalerta.com.br/noticias/"
news_container = "ul.lista-noticias"
news_url = "a"
news_regex = ""
# get article
g = Goose()
url = 'https://iaexpert.academy/2020/11/09/ia-preve-resultado-das-eleicoes-americanas/'
artigo_portugues = g.extract(url)
url_list = html_scraper(global_url, news_container, news_url)
article_list = scrap_them_all(url_list[0])
# Creating a HTML with summary from a text
list_freq, best_sentencas = summarization_freq(article_list['description'])
save_summary('teste', list_freq, best_sentencas)
# Summary with sumy
resumo = summarizer_sumy(artigo_portugues)
for i in resumo:
print(i)
| StarcoderdataPython |
6578286 | import FWCore.ParameterSet.Config as cms
# Ideal geometry for ECAL+HCAL, needed for simulation
from Geometry.CMSCommonData.ecalhcalGeometryXML_cfi import *
from Geometry.HcalCommonData.hcalParameters_cfi import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cfi import *
| StarcoderdataPython |
11325451 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 00:06:45 2019
@author: Mochi
"""
import os
os.system('sudo shutdown -h now') | StarcoderdataPython |
8069935 | """
백준 6497번 : 전력난
- 크루스칼 알고리즘 혹은 프림 알고리즘
"""
import sys
input = sys.stdin.readline
def find(x):
if Nroot[x] != x:
Nroot[x] = find(Nroot[x])
return Nroot[x]
while True:
M, N = map(int, input().split())
# 이 포인트가 중요.
if M == 0 and N == 0:
break
Nroot = [i for i in range(M)]
Elist = []
for _ in range(N):
s, e, w = map(int, input().split())
Elist.append((s, e, w))
Elist.sort(key=lambda x: x[2])
result = 0
for s, e, w in Elist:
sRoot = find(s)
eRoot = find(e)
if sRoot != eRoot:
if sRoot > eRoot:
Nroot[sRoot] = eRoot
else:
Nroot[eRoot] = sRoot
result += w
# 기존은 최소화한 비용 출력, 이번 답은 전체 값 중 아낀 비용 출력
print(sum(e[2] for e in Elist) - result) | StarcoderdataPython |
9629647 | <reponame>syncccc/Python-Crash-Course
# 创建原始列表并打印
travel_places = ['lhasa','lijiang','qinghai lake','skyland']
print(travel_places)
# 按字母顺序打印该列表,同时不要修改原始列表
print("\n这里是按照字母顺序打印的列表:")
print(sorted(travel_places))
# 再次打印该列表,核实顺序未变
print("\n这个是原始列表:")
print(travel_places)
# 按与字母顺序相反的顺序打印这个列表,同时不要修改它
print("\n这个是与字母顺序相反的列表:")
print(sorted(travel_places,reverse = True))
# 再次打印该列表,核实顺序未变
print("\n这个是原始列表:")
print(travel_places)
# 修改列表元素的排列顺序
print("\n这个是逆序排列的列表:")
travel_places.reverse()
# 核实排列顺序
print(travel_places)
# 再次修改列表元素的排列顺序
print("\n这个是正序排列的列表:")
travel_places.reverse()
# 核实排列顺序
print(travel_places)
# 修改列表,使其元素按照字母顺序排序
travel_places.sort()
# 核实排列顺序
print(travel_places)
# 修改列表,使其元素按与字母顺序相反的顺序排列
travel_places.sort(reverse = True)
# 核实排列顺序
print(travel_places) | StarcoderdataPython |
8171584 | <reponame>mbeacom/mythril
from mythril.analysis.report import Report
from mythril.analysis import modules
import pkgutil
import logging
def fire_lasers(statespace):
issues = []
_modules = []
for loader, name, is_pkg in pkgutil.walk_packages(modules.__path__):
_modules.append(loader.find_module(name).load_module(name))
logging.info("Starting analysis")
for module in _modules:
logging.info("Executing " + str(module))
issues += module.execute(statespace)
report = Report()
if (len(issues)):
for i in range(0, len(issues)):
report.append_issue(issues[i])
print(report.as_text())
else:
print("The analysis was completed successfully. No issues were detected.")
| StarcoderdataPython |
1794974 | <reponame>cccccccccccccc/Myleetcode<filename>804/uniquemorsecodeword.py
from typing import List
class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
morseform = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
uniqueset = set()
for word in words:
tmp = ''
for i in word:
tmp += morseform[ord(i)-ord('a')]
uniqueset.add(tmp)
return len(uniqueset)
A = Solution()
words = ["gin", "zen", "gig", "msg"]
print(A.uniqueMorseRepresentations(words))
| StarcoderdataPython |
9533 | import pygame.font
import copy
class Text:
"""Draws a text to the screen."""
def __init__(self, rect, size, color, screen, text):
self.screen = screen
self.rect = copy.deepcopy(rect)
self.text = text
self.color = color
self.font = pygame.font.SysFont(None, size)
self.text_image = None
self.text_image_rect = None
self.prep_img()
def prep_img(self):
"""Turn msg into a rendered image, and center text on the button."""
self.text_image = self.font.render(self.text, True,
self.color)
self.text_image_rect = self.text_image.get_rect()
self.text_image_rect.center = self.rect.center
def render(self):
self.screen.blit(self.text_image, self.text_image_rect)
| StarcoderdataPython |
6597468 | import os
import platform
import shutil
import tempfile
import warnings
from collections import Counter
from os.path import join as pjoin
from typing import MutableMapping, Optional
import lmdb
import configparser
from . import constants as c
from . import __version__
class TxnRegisterSingleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(TxnRegisterSingleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class TxnRegister(metaclass=TxnRegisterSingleton):
"""Singleton to manage transaction thread safety in lmdb databases.
This is essentailly a reference counting transaction register, lots of room
for improvement here.
"""
def __init__(self):
self.WriterAncestors = Counter()
self.ReaderAncestors = Counter()
self.WriterTxn: MutableMapping[lmdb.Environment, lmdb.Transaction] = {}
self.ReaderTxn: MutableMapping[lmdb.Environment, lmdb.Transaction] = {}
def begin_writer_txn(self, lmdbenv: lmdb.Environment,
buffer: bool = False) -> lmdb.Transaction:
"""Start a write enabled transaction on the given environment
If multiple write transactions are requested for the same handle, only
one instance of the transaction handle will be returened, and will not
close until all operations on that handle have requested to close
Parameters
----------
lmdbenv : lmdb.Environment
the environment to open the transaction on
buffer : bool, optional
if buffer objects should be used (the default is False, which does
not use buffers)
Returns
-------
lmdb.Transaction
transaction handle to perform operations on
"""
if self.WriterAncestors[lmdbenv] == 0:
self.WriterTxn[lmdbenv] = lmdbenv.begin(write=True, buffers=buffer)
self.WriterAncestors[lmdbenv] += 1
return self.WriterTxn[lmdbenv]
def begin_reader_txn(self, lmdbenv: lmdb.Environment,
buffer: bool = False) -> lmdb.Transaction:
"""Start a reader only txn for the given environment
If there a read-only transaction for the same environment already exists
then the same reader txn handle will be returned, and will not close
until all operations on that handle have said they are finished.
Parameters
----------
lmdbenv : lmdb.Environment
the environment to start the transaction in.
buffer : bool, optional
weather a buffer transaction should be used (the default is False,
which means no buffers are returned)
Returns
-------
lmdb.Transaction
handle to the lmdb transaction.
"""
if self.ReaderAncestors[lmdbenv] == 0:
self.ReaderTxn[lmdbenv] = lmdbenv.begin(write=False, buffers=buffer)
self.ReaderAncestors[lmdbenv] += 1
return self.ReaderTxn[lmdbenv]
def commit_writer_txn(self, lmdbenv: lmdb.Environment) -> bool:
"""Commit changes made in a write-enable transaction handle
As multiple objects can have references to the same open transaction handle,
the data is not actually committed until all open transactions have called
the commit method.
Parameters
----------
lmdbenv : lmdb.Environment
the environment handle used to open the transaction
Raises
------
RuntimeError
If the internal reference counting gets out of sync
Returns
-------
bool
True if this operation actually committed, otherwise false
if other objects have references to the same (open) handle
"""
ancestors = self.WriterAncestors[lmdbenv]
if ancestors == 0:
msg = f'hash ancestors are zero but commit called on {lmdbenv}'
raise RuntimeError(msg)
elif ancestors == 1:
self.WriterTxn[lmdbenv].commit()
self.WriterTxn.__delitem__(lmdbenv)
ret = True
else:
ret = False
self.WriterAncestors[lmdbenv] -= 1
return ret
def abort_reader_txn(self, lmdbenv: lmdb.Environment) -> bool:
"""Request to close a read-only transaction handle
As multiple objects can have references to the same open transaction
handle, the transaction is not actuall aborted until all open transactions
have called the abort method
Parameters
----------
lmdbenv : lmdb.Environment
the environment handle used to open the transaction
Raises
------
RuntimeError
If the internal reference counting gets out of sync.
Returns
-------
bool
True if this operation actually aborted the transaction,
otherwise False if other objects have references to the same (open)
handle.
"""
ancestors = self.ReaderAncestors[lmdbenv]
if ancestors == 0:
raise RuntimeError(f'hash ancestors are zero but abort called')
elif ancestors == 1:
self.ReaderTxn[lmdbenv].abort()
self.ReaderTxn.__delitem__(lmdbenv)
ret = True
else:
ret = False
self.ReaderAncestors[lmdbenv] -= 1
return ret
"""
Todo, refactor to avoid the need for these imports to be below TxnRegister,
if they aren't right now, we get circular imports...
"""
from .records import commiting, heads, parsing, vcompat # noqa: E402
from .utils import readme_contents # noqa: E402
class Environments(object):
def __init__(self, pth: os.PathLike):
self.repo_path: os.PathLike = pth
self.refenv: Optional[lmdb.Environment] = None
self.hashenv: Optional[lmdb.Environment] = None
self.stageenv: Optional[lmdb.Environment] = None
self.branchenv: Optional[lmdb.Environment] = None
self.labelenv: Optional[lmdb.Environment] = None
self.stagehashenv: Optional[lmdb.Environment] = None
self.cmtenv: MutableMapping[str, lmdb.Environment] = {}
self._startup()
@property
def repo_is_initialized(self) -> bool:
"""Property to check if the repository is initialized, read-only attribute
Returns
-------
bool
True if repo environments are initialized, False otherwise
"""
ret = True if isinstance(self.refenv, lmdb.Environment) else False
return ret
def _startup(self) -> bool:
"""When first access to the Repo starts, attempt to open the db envs.
This function is designed to fail if a repository does not exist at the
:py:attribute:`repo_path` which is specified, so the user can
explicitly choose to initialize the repo. Once opened, the lmdb
environments should not be closed until the program terminates.
Returns
-------
bool False if no repository exists at the given path, otherwise True
Warns
-----
UserWarning Should the repository not exist at the provided repo path.
Raises
------
RuntimeError If the repository version is not compatible with the
current software.
"""
if not os.path.isfile(pjoin(self.repo_path, c.LMDB_BRANCH_NAME)):
msg = f'No repository exists at {self.repo_path}, please use `repo.init()` method'
warnings.warn(msg, UserWarning)
return False
repo_ver = vcompat.startup_check_repo_version(self.repo_path)
curr_ver = parsing.repo_version_raw_spec_from_raw_string(v_str=__version__)
if not vcompat.is_repo_software_version_compatible(repo_ver, curr_ver):
msg = f'repository written version: {repo_ver} is not comatible '\
f'with the current Hangar software version: {curr_ver}'
raise RuntimeError(msg)
self._open_environments()
return True
def _init_repo(self,
user_name: str,
user_email: str,
description: str = None,
remove_old: bool = False) -> os.PathLike:
"""Create a new hangar repositiory at the specified environment path.
Parameters
----------
user_name : str
Name of the repository user.
user_email : str
Email address of the respository user.
remove_old : bool, optional(default value = False)
DEVELOPER USE ONLY --- Remove all data and records stored in the
repository if this opetion is enabled, defaults to False.
Returns
-------
os.PathLike
The path to the newly created repository on disk.
Raises
------
OSError
If a hangar repository exists at the specified path, and `remove_old`
was not set to ``True``.
"""
if os.path.isfile(pjoin(self.repo_path, c.LMDB_BRANCH_NAME)):
if remove_old is True:
shutil.rmtree(self.repo_path)
else:
raise OSError(f'Hangar Directory: {self.repo_path} already exists')
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_STORE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_STAGE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA_REMOTE))
os.makedirs(pjoin(self.repo_path, c.DIR_DATA))
print(f'Hangar Repo initialized at: {self.repo_path}')
if description:
userConf = {'name': user_name, 'email': user_email, 'description': description}
else:
userConf = {'name': user_name, 'email': user_email}
CFG = configparser.ConfigParser()
CFG.read_dict(userConf)
with open(pjoin(self.repo_path, c.CONFIG_USER_NAME), 'w') as f:
CFG.write(f)
readmeTxt = readme_contents(user_name, user_email, description)
with open(pjoin(self.repo_path, c.README_FILE_NAME), 'w') as f:
f.write(readmeTxt.getvalue())
self._open_environments()
vcompat.set_repository_software_version(branchenv=self.branchenv, ver_str=__version__)
heads.create_branch(self.branchenv, 'master', '')
heads.set_staging_branch_head(self.branchenv, 'master')
return self.repo_path
def checkout_commit(self, branch_name: str = '', commit: str = '') -> str:
"""Set up db environment with unpacked commit ref records.
Parameters
----------
repo_pth : str
path to the repository directory on the local disk
branch_name : str, optional
name of the branch to read, defaults to ''
commit : str, optional
name of the commit to read, defaults to ''
Returns
-------
str
commit hash which was checked out
"""
if commit != '':
commit_hash = commit
txt = f' * Checking out COMMIT: {commit_hash}'
elif branch_name != '':
commit_hash = heads.get_branch_head_commit(self.branchenv, branch_name)
txt = f' * Checking out BRANCH: {branch_name} with current HEAD: {commit_hash}'
else:
head_branch = heads.get_staging_branch_head(self.branchenv)
commit_hash = heads.get_branch_head_commit(self.branchenv, head_branch)
txt = f'\n Neither BRANCH or COMMIT specified.'\
f'\n * Checking out writing HEAD BRANCH: {head_branch}'
print(txt)
# On UNIX-like system, an open process still retains ability to
# interact with disk space allocated to a file when it is removed from
# disk. Windows does not, and will not allow file to be removed if a
# process is interacting with it. While the CM form is cleaner, this
# hack allows similar usage on Windows platforms.
if platform.system() != 'Windows':
with tempfile.TemporaryDirectory() as tempD:
tmpDF = os.path.join(tempD, f'{commit_hash}.lmdb')
tmpDB = lmdb.open(path=tmpDF, **c.LMDB_SETTINGS)
commiting.unpack_commit_ref(self.refenv, tmpDB, commit_hash)
self.cmtenv[commit_hash] = tmpDB
else:
tempD = tempfile.mkdtemp()
tmpDF = os.path.join(tempD, f'{commit_hash}.lmdb')
tmpDB = lmdb.open(path=tmpDF, **c.LMDB_SETTINGS)
commiting.unpack_commit_ref(self.refenv, tmpDB, commit_hash)
self.cmtenv[commit_hash] = tmpDB
return commit_hash
def _open_environments(self):
"""Open the standard lmdb databases at the repo path.
If any commits are checked out (in an unpacked state), read those in as
well.
"""
ref_pth = pjoin(self.repo_path, c.LMDB_REF_NAME)
hash_pth = pjoin(self.repo_path, c.LMDB_HASH_NAME)
stage_pth = pjoin(self.repo_path, c.LMDB_STAGE_REF_NAME)
branch_pth = pjoin(self.repo_path, c.LMDB_BRANCH_NAME)
label_pth = pjoin(self.repo_path, c.LMDB_META_NAME)
stagehash_pth = pjoin(self.repo_path, c.LMDB_STAGE_HASH_NAME)
self.refenv = lmdb.open(path=ref_pth, **c.LMDB_SETTINGS)
self.hashenv = lmdb.open(path=hash_pth, **c.LMDB_SETTINGS)
self.stageenv = lmdb.open(path=stage_pth, **c.LMDB_SETTINGS)
self.branchenv = lmdb.open(path=branch_pth, **c.LMDB_SETTINGS)
self.labelenv = lmdb.open(path=label_pth, **c.LMDB_SETTINGS)
self.stagehashenv = lmdb.open(path=stagehash_pth, **c.LMDB_SETTINGS)
def _close_environments(self):
self.refenv.close()
self.hashenv.close()
self.stageenv.close()
self.branchenv.close()
self.labelenv.close()
self.stagehashenv.close()
for env in self.cmtenv.values():
if platform.system() == 'Windows':
envpth = env.path()
env.close()
os.remove(envpth)
else:
env.close() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.