code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from __future__ import print_function
import argparse
import collections
import copy
import io
import json
import logging
import os
import sys
DEFAULT_LOG_LEVEL = logging.DEBUG
LOG_LEVELS = collections.defaultdict(
lambda: DEFAULT_LOG_LEVEL,
{
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
},
)
# Lambda initializes a root logger that needs to be removed in order to set a
# different logging config
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(
filename="iam_handler.log",
format="%(asctime)s.%(msecs)03dZ [%(name)s][%(levelname)-5s]: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
level=LOG_LEVELS[os.environ.get("LOG_LEVEL", "").lower()],
)
log = logging.getLogger(__name__)
def _join_paths(*paths):
return "/".join(*paths).replace("//", "/")
def _read(path, encoding="utf8", **kwargs):
"""Read a file."""
with io.open(path, encoding=encoding, **kwargs) as fh_:
return fh_.read()
def _merge_iam_policy_doc(doc1, doc2):
# adopt doc2's Id
if doc2.get("Id"):
doc1["Id"] = doc2["Id"]
# let doc2 upgrade our Version
if doc2.get("Version", "") > doc1.get("Version", ""):
doc1["Version"] = doc2["Version"]
# merge in doc2's statements, overwriting any existing Sids
for doc2_statement in doc2["Statement"]:
if not doc2_statement.get("Sid"):
doc1["Statement"].append(doc2_statement)
continue
seen = False
for index, doc1_statement in enumerate(doc1["Statement"]):
if doc1_statement.get("Sid") == doc2_statement.get("Sid"):
doc1["Statement"][index] = doc2_statement
seen = True
break
if not seen:
doc1["Statement"].append(doc2_statement)
return doc1
def main(name, template_paths, template, **kwargs):
"""Merge policy documents for all template paths."""
iam_policy_doc = {"Statement": []}
ret = {
"name": name,
"policy": copy.deepcopy(iam_policy_doc),
}
log.info("=" * 40)
log.info("name = %s", name)
log.info("template = %s", template)
log.info("template_paths = %s", template_paths)
for path in template_paths:
policy_path = _join_paths([path, template])
if os.path.isfile(policy_path):
ret["policy"] = _merge_iam_policy_doc(
ret["policy"], json.loads(_read(policy_path))
)
ret["policy"] = json.dumps(ret["policy"])
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"json",
type=argparse.FileType("r"),
default=sys.stdin,
help="Parses input from a json file or stdin",
)
args = parser.parse_args()
json_args = {}
with args.json as fp_:
json_args = json.load(fp_)
for arg, val in json_args.items():
json_args[arg] = json.loads(val)
sys.exit(print(json.dumps(main(**json_args)))) | modules/policy_documents/policy_document_handler.py | from __future__ import print_function
import argparse
import collections
import copy
import io
import json
import logging
import os
import sys
DEFAULT_LOG_LEVEL = logging.DEBUG
LOG_LEVELS = collections.defaultdict(
lambda: DEFAULT_LOG_LEVEL,
{
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
},
)
# Lambda initializes a root logger that needs to be removed in order to set a
# different logging config
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(
filename="iam_handler.log",
format="%(asctime)s.%(msecs)03dZ [%(name)s][%(levelname)-5s]: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
level=LOG_LEVELS[os.environ.get("LOG_LEVEL", "").lower()],
)
log = logging.getLogger(__name__)
def _join_paths(*paths):
return "/".join(*paths).replace("//", "/")
def _read(path, encoding="utf8", **kwargs):
"""Read a file."""
with io.open(path, encoding=encoding, **kwargs) as fh_:
return fh_.read()
def _merge_iam_policy_doc(doc1, doc2):
# adopt doc2's Id
if doc2.get("Id"):
doc1["Id"] = doc2["Id"]
# let doc2 upgrade our Version
if doc2.get("Version", "") > doc1.get("Version", ""):
doc1["Version"] = doc2["Version"]
# merge in doc2's statements, overwriting any existing Sids
for doc2_statement in doc2["Statement"]:
if not doc2_statement.get("Sid"):
doc1["Statement"].append(doc2_statement)
continue
seen = False
for index, doc1_statement in enumerate(doc1["Statement"]):
if doc1_statement.get("Sid") == doc2_statement.get("Sid"):
doc1["Statement"][index] = doc2_statement
seen = True
break
if not seen:
doc1["Statement"].append(doc2_statement)
return doc1
def main(name, template_paths, template, **kwargs):
"""Merge policy documents for all template paths."""
iam_policy_doc = {"Statement": []}
ret = {
"name": name,
"policy": copy.deepcopy(iam_policy_doc),
}
log.info("=" * 40)
log.info("name = %s", name)
log.info("template = %s", template)
log.info("template_paths = %s", template_paths)
for path in template_paths:
policy_path = _join_paths([path, template])
if os.path.isfile(policy_path):
ret["policy"] = _merge_iam_policy_doc(
ret["policy"], json.loads(_read(policy_path))
)
ret["policy"] = json.dumps(ret["policy"])
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"json",
type=argparse.FileType("r"),
default=sys.stdin,
help="Parses input from a json file or stdin",
)
args = parser.parse_args()
json_args = {}
with args.json as fp_:
json_args = json.load(fp_)
for arg, val in json_args.items():
json_args[arg] = json.loads(val)
sys.exit(print(json.dumps(main(**json_args)))) | 0.371707 | 0.077378 |
"""Compute the RNA features."""
from __future__ import print_function
import argparse
import sys
from eden.converter.fasta import fasta_to_sequence
from eden.converter.rna.rnaplfold import rnaplfold_to_eden
from eden.graph import Vectorizer
from eden.util import vectorize as eden_vectorize
import pandas as pd
from rnacommender import fasta_utils
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class RNAVectorizer():
"""Compute the RNA features."""
def __init__(self, fasta, output, window_size=150, max_bp_span=40,
avg_bp_prob_cutoff=0.4, complexity=2, nbits=10,
njobs=-1, verbose=True):
"""
Constructor.
Parameters
----------
fasta : str
Fasta file containing the RNA sequences.
output : str
Name of the output file. The output file is an HDF5 containing a
pandas DataFrame, in which the columns are the RNA names and the
rows are the EDeN features.
window_size : int (default : 150)
Window size of RNAplfold. Average the pair
probabilities over windows of given size.
max_bp_span : int (default : 40)
Maximum allowed separation of a base pair to span.
I.e. no pairs (i,j) with j-i > span will be allowed.
avg_bp_prob_cutoff : float (default : 0.4)
Report only base pairs with an average probability > cutoff.
complexity : int (default : 2)
Complexity of the features extracted. Equivalent to
define EDeN parameters d = r = complexity.
nbits : int (default : 10)
Number of bits that defines the feature space size:
|feature space|=2^nbits.
njobs : int (default : -1)
Number of parallel jobs (default: all CPUs).
verbose : bool (default : True)
Print information to STDOUT.
"""
self.fasta = fasta
self.output = output
self.window_size = window_size
self.max_bp_span = max_bp_span
self.avg_bp_prob_cutoff = avg_bp_prob_cutoff
self.complexity = complexity
self.nbits = nbits
self.njobs = njobs
self.verbose = verbose
def _fold_sequences(self):
"""Fold the RNA sequences using RNAplfold."""
if self.verbose:
print("Folding sequences using RNAplfold -W %i -L %i -c %f \
--noLP..." % (self.window_size, self.max_bp_span,
self.avg_bp_prob_cutoff), end=' ')
sys.stdout.flush()
seqs = fasta_to_sequence(self.fasta)
graphs = rnaplfold_to_eden(seqs,
window_size=self.window_size,
max_bp_span=self.max_bp_span,
avg_bp_prob_cutoff=self.avg_bp_prob_cutoff,
max_num_edges=1)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return graphs
def _vectorize_graphs(self, graphs):
"""Vectorize the RNAplfold graphs using EDeN."""
if self.verbose:
print("Vectorizing (complexity: %i, hashing: %i bits)..." %
(self.complexity, self.nbits), end=' ')
sys.stdout.flush()
vec = Vectorizer(complexity=self.complexity, nbits=self.nbits)
x_sparse = eden_vectorize(graphs, vectorizer=vec, n_jobs=self.njobs)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return x_sparse.todense()
def vectorize(self):
"""Produce the RNAfeatures."""
names = fasta_utils.seq_names(self.fasta)
graphs = self._fold_sequences()
x = self._vectorize_graphs(graphs)
df = pd.DataFrame(x.T[1:], columns=names)
store = pd.io.pytables.HDFStore(self.output)
store['features'] = df
store.close()
if self.verbose:
print("Done.\n")
print("RNA features saved to %s" % self.output)
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='fasta', type=str,
help="""Fasta file containing the RNA sequences.""")
parser.add_argument('output', metavar='output', type=str,
help="""File name of the HDF Store to save the RNA \
features.""")
# RNAplfold parameters
parser.add_argument('--window-size', metavar='window_size', type=int,
default=150, help="""Window size of RNAplfold.""")
parser.add_argument('--max-bp-span', metavar='max_bp_span', type=int,
default=40, help="""Maximum allowed separation of a \
base pair to span.""")
parser.add_argument('--avg-bp-prob-cutoff', metavar='avg_bp_prob_cutoff',
type=float, default=0.4, help="""Report only base \
pairs with an average probability > cutoff.""")
# EDeN parameters
parser.add_argument('--complexity', metavar='complexity', type=int,
default=2, help="""Complexity of the features \
extracted.""")
parser.add_argument('--nbits', metavar='nbits', type=int, default=10,
help="""Number of bits that defines the feature space \
size: |feature space|=2^nbits.""")
# Other paramentes
parser.add_argument('--njobs', metavar='njobs', type=int, default=-1,
help="""Number of parallel jobs (-1 means all \
CPUs).""")
parser.add_argument('--quiet', dest='quiet', action='store_true',
default=False, help="""Do not print information at \
STDOUT.""")
args = parser.parse_args()
v = RNAVectorizer(fasta=args.fasta,
output=args.output,
window_size=args.window_size,
max_bp_span=args.max_bp_span,
avg_bp_prob_cutoff=args.avg_bp_prob_cutoff,
complexity=args.complexity,
nbits=args.nbits,
njobs=args.njobs,
verbose=(not args.quiet))
v.vectorize() | rnacommender/rnafeatures.py | """Compute the RNA features."""
from __future__ import print_function
import argparse
import sys
from eden.converter.fasta import fasta_to_sequence
from eden.converter.rna.rnaplfold import rnaplfold_to_eden
from eden.graph import Vectorizer
from eden.util import vectorize as eden_vectorize
import pandas as pd
from rnacommender import fasta_utils
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class RNAVectorizer():
"""Compute the RNA features."""
def __init__(self, fasta, output, window_size=150, max_bp_span=40,
avg_bp_prob_cutoff=0.4, complexity=2, nbits=10,
njobs=-1, verbose=True):
"""
Constructor.
Parameters
----------
fasta : str
Fasta file containing the RNA sequences.
output : str
Name of the output file. The output file is an HDF5 containing a
pandas DataFrame, in which the columns are the RNA names and the
rows are the EDeN features.
window_size : int (default : 150)
Window size of RNAplfold. Average the pair
probabilities over windows of given size.
max_bp_span : int (default : 40)
Maximum allowed separation of a base pair to span.
I.e. no pairs (i,j) with j-i > span will be allowed.
avg_bp_prob_cutoff : float (default : 0.4)
Report only base pairs with an average probability > cutoff.
complexity : int (default : 2)
Complexity of the features extracted. Equivalent to
define EDeN parameters d = r = complexity.
nbits : int (default : 10)
Number of bits that defines the feature space size:
|feature space|=2^nbits.
njobs : int (default : -1)
Number of parallel jobs (default: all CPUs).
verbose : bool (default : True)
Print information to STDOUT.
"""
self.fasta = fasta
self.output = output
self.window_size = window_size
self.max_bp_span = max_bp_span
self.avg_bp_prob_cutoff = avg_bp_prob_cutoff
self.complexity = complexity
self.nbits = nbits
self.njobs = njobs
self.verbose = verbose
def _fold_sequences(self):
"""Fold the RNA sequences using RNAplfold."""
if self.verbose:
print("Folding sequences using RNAplfold -W %i -L %i -c %f \
--noLP..." % (self.window_size, self.max_bp_span,
self.avg_bp_prob_cutoff), end=' ')
sys.stdout.flush()
seqs = fasta_to_sequence(self.fasta)
graphs = rnaplfold_to_eden(seqs,
window_size=self.window_size,
max_bp_span=self.max_bp_span,
avg_bp_prob_cutoff=self.avg_bp_prob_cutoff,
max_num_edges=1)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return graphs
def _vectorize_graphs(self, graphs):
"""Vectorize the RNAplfold graphs using EDeN."""
if self.verbose:
print("Vectorizing (complexity: %i, hashing: %i bits)..." %
(self.complexity, self.nbits), end=' ')
sys.stdout.flush()
vec = Vectorizer(complexity=self.complexity, nbits=self.nbits)
x_sparse = eden_vectorize(graphs, vectorizer=vec, n_jobs=self.njobs)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return x_sparse.todense()
def vectorize(self):
"""Produce the RNAfeatures."""
names = fasta_utils.seq_names(self.fasta)
graphs = self._fold_sequences()
x = self._vectorize_graphs(graphs)
df = pd.DataFrame(x.T[1:], columns=names)
store = pd.io.pytables.HDFStore(self.output)
store['features'] = df
store.close()
if self.verbose:
print("Done.\n")
print("RNA features saved to %s" % self.output)
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='fasta', type=str,
help="""Fasta file containing the RNA sequences.""")
parser.add_argument('output', metavar='output', type=str,
help="""File name of the HDF Store to save the RNA \
features.""")
# RNAplfold parameters
parser.add_argument('--window-size', metavar='window_size', type=int,
default=150, help="""Window size of RNAplfold.""")
parser.add_argument('--max-bp-span', metavar='max_bp_span', type=int,
default=40, help="""Maximum allowed separation of a \
base pair to span.""")
parser.add_argument('--avg-bp-prob-cutoff', metavar='avg_bp_prob_cutoff',
type=float, default=0.4, help="""Report only base \
pairs with an average probability > cutoff.""")
# EDeN parameters
parser.add_argument('--complexity', metavar='complexity', type=int,
default=2, help="""Complexity of the features \
extracted.""")
parser.add_argument('--nbits', metavar='nbits', type=int, default=10,
help="""Number of bits that defines the feature space \
size: |feature space|=2^nbits.""")
# Other paramentes
parser.add_argument('--njobs', metavar='njobs', type=int, default=-1,
help="""Number of parallel jobs (-1 means all \
CPUs).""")
parser.add_argument('--quiet', dest='quiet', action='store_true',
default=False, help="""Do not print information at \
STDOUT.""")
args = parser.parse_args()
v = RNAVectorizer(fasta=args.fasta,
output=args.output,
window_size=args.window_size,
max_bp_span=args.max_bp_span,
avg_bp_prob_cutoff=args.avg_bp_prob_cutoff,
complexity=args.complexity,
nbits=args.nbits,
njobs=args.njobs,
verbose=(not args.quiet))
v.vectorize() | 0.803714 | 0.444565 |
from files.launcher import Launcher
Launcher.set('ulauncher')
import os
from files.service import FilesService
from files.icon import IconRegistry
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, PreferencesEvent, PreferencesUpdateEvent, SystemExitEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.OpenAction import OpenAction
home = os.path.expanduser('~')
replace_home = lambda path: path.replace(home, '~', 1)
class FilesExtension(Extension):
def __init__(self):
super(FilesExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent, PreferencesUpdateEventListener())
self.subscribe(SystemExitEvent, SystemExitEventListener)
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
query_str = event.get_argument() or ''
results = FilesService().search(query_str)
items = []
for result in results:
icon = IconRegistry().get_icon(result)
name = result.name
path = result.path
on_enter = OpenAction(path)
items.append(ExtensionResultItem(
icon=icon,
name=name,
description=replace_home(path),
highlightable=True,
on_enter=on_enter
))
return RenderResultListAction(items)
class PreferencesEventListener(EventListener):
def on_event(self, event, extension):
super().on_event(event, extension)
scan_every_minutes = event.preferences['scan_every_minutes']
directories = event.preferences['directories']
search_after_characters = event.preferences['search_after_characters']
search_max_results = event.preferences['search_max_results']
search_threshold = event.preferences['search_threshold']
ignore_filename = event.preferences['ignore_filename']
icon_theme = event.preferences['icon_theme']
use_built_in_folder_theme = event.preferences['use_built_in_folder_theme']
FilesService().load_settings_ulauncher(scan_every_minutes, directories,
search_after_characters, search_max_results,
search_threshold, ignore_filename,
icon_theme, use_built_in_folder_theme)
FilesService().run()
class PreferencesUpdateEventListener(EventListener):
def on_event(self, event, extension):
super().on_event(event, extension)
service = FilesService()
if event.id == 'scan_every_minutes':
service.set_scan_every_minutes(event.new_value)
elif event.id == 'directories':
service.set_directories(event.new_value)
elif event.id == 'search_after_characters':
service.set_search_after_characters(event.new_value)
elif event.id == 'search_max_results':
service.set_search_max_results(event.new_value)
elif event.id == 'search_threshold':
service.set_search_threshold(event.new_value)
elif event.id == 'ignore_filename':
service.set_ignore_filename(event.new_value)
elif event.id == 'icon_theme':
service.set_icon_theme(event.new_value)
elif event.id == 'use_built_in_folder_theme':
service.set_use_built_in_folder_theme(event.new_value)
class SystemExitEventListener(EventListener):
def on_event(event, extension):
FilesService().stop()
if __name__ == '__main__':
FilesExtension().run() | main.py | from files.launcher import Launcher
Launcher.set('ulauncher')
import os
from files.service import FilesService
from files.icon import IconRegistry
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, PreferencesEvent, PreferencesUpdateEvent, SystemExitEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.OpenAction import OpenAction
home = os.path.expanduser('~')
replace_home = lambda path: path.replace(home, '~', 1)
class FilesExtension(Extension):
def __init__(self):
super(FilesExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent, PreferencesUpdateEventListener())
self.subscribe(SystemExitEvent, SystemExitEventListener)
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
query_str = event.get_argument() or ''
results = FilesService().search(query_str)
items = []
for result in results:
icon = IconRegistry().get_icon(result)
name = result.name
path = result.path
on_enter = OpenAction(path)
items.append(ExtensionResultItem(
icon=icon,
name=name,
description=replace_home(path),
highlightable=True,
on_enter=on_enter
))
return RenderResultListAction(items)
class PreferencesEventListener(EventListener):
def on_event(self, event, extension):
super().on_event(event, extension)
scan_every_minutes = event.preferences['scan_every_minutes']
directories = event.preferences['directories']
search_after_characters = event.preferences['search_after_characters']
search_max_results = event.preferences['search_max_results']
search_threshold = event.preferences['search_threshold']
ignore_filename = event.preferences['ignore_filename']
icon_theme = event.preferences['icon_theme']
use_built_in_folder_theme = event.preferences['use_built_in_folder_theme']
FilesService().load_settings_ulauncher(scan_every_minutes, directories,
search_after_characters, search_max_results,
search_threshold, ignore_filename,
icon_theme, use_built_in_folder_theme)
FilesService().run()
class PreferencesUpdateEventListener(EventListener):
def on_event(self, event, extension):
super().on_event(event, extension)
service = FilesService()
if event.id == 'scan_every_minutes':
service.set_scan_every_minutes(event.new_value)
elif event.id == 'directories':
service.set_directories(event.new_value)
elif event.id == 'search_after_characters':
service.set_search_after_characters(event.new_value)
elif event.id == 'search_max_results':
service.set_search_max_results(event.new_value)
elif event.id == 'search_threshold':
service.set_search_threshold(event.new_value)
elif event.id == 'ignore_filename':
service.set_ignore_filename(event.new_value)
elif event.id == 'icon_theme':
service.set_icon_theme(event.new_value)
elif event.id == 'use_built_in_folder_theme':
service.set_use_built_in_folder_theme(event.new_value)
class SystemExitEventListener(EventListener):
def on_event(event, extension):
FilesService().stop()
if __name__ == '__main__':
FilesExtension().run() | 0.267217 | 0.039379 |
import unittest
import sys
import rosunit
from mock import patch
from parameterized import parameterized, param
from fiware_ros_bridge.logging import getLogger
class TestGetLogger(unittest.TestCase):
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_wo_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
log_message = '[{name}:{caller}] {message}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_w_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
arg0 = 'arg0'
arg1 = 'arg1'
kwargs0 = 'kwargs0'
kwargs1 = 'kwargs1'
log_message = '[{name}:{caller}] {message}, {arg1}, {kwargs0}, {arg0}, {kwargs1}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
arg0=arg0,
arg1=arg1,
kwargs0=kwargs0,
kwargs1=kwargs1,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message + ', {1}, {kwargs0}, {0}, {kwargs1}', arg0, arg1, kwargs1=kwargs1, kwargs0=kwargs0)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
if __name__ == '__main__':
rosunit.unitrun('fiware_ros_bridge', 'test_logging', TestGetLogger) | tests/test_logging.py | import unittest
import sys
import rosunit
from mock import patch
from parameterized import parameterized, param
from fiware_ros_bridge.logging import getLogger
class TestGetLogger(unittest.TestCase):
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_wo_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
log_message = '[{name}:{caller}] {message}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
@parameterized.expand([
param(logm='debugf', rosm='logdebug'),
param(logm='infof', rosm='loginfo'),
param(logm='warnf', rosm='logwarn'),
param(logm='errorf', rosm='logerr'),
param(logm='fatalf', rosm='logfatal'),
])
@patch('fiware_ros_bridge.logging.rospy')
def test_log_w_params(self, mocked_rospy, logm, rosm):
name = 'foo'
message = 'test message'
arg0 = 'arg0'
arg1 = 'arg1'
kwargs0 = 'kwargs0'
kwargs1 = 'kwargs1'
log_message = '[{name}:{caller}] {message}, {arg1}, {kwargs0}, {arg0}, {kwargs1}'.format(
name=name,
caller=self.__class__.__name__ + '.' + sys._getframe().f_code.co_name,
message=message,
arg0=arg0,
arg1=arg1,
kwargs0=kwargs0,
kwargs1=kwargs1,
)
logger = getLogger(name)
assert logger.name == name
getattr(logger, logm)(message + ', {1}, {kwargs0}, {0}, {kwargs1}', arg0, arg1, kwargs1=kwargs1, kwargs0=kwargs0)
getattr(mocked_rospy, rosm).assert_called_once_with(log_message)
if __name__ == '__main__':
rosunit.unitrun('fiware_ros_bridge', 'test_logging', TestGetLogger) | 0.310172 | 0.170819 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from collections import Counter
import sys
import pickle
# Parameters
params = { 'N_sub': 300,
'B_tags': 5,
'reg_C': 10.0 }
# Set random seed for reproducible output
np.random.seed(137)
# Load pickled data
source_dir = sys.argv[1]
input = open('%s/parse.pkl' % source_dir, 'rb')
data = pickle.load(input)
tags, scp_tags, links = Counter(data['tags']), data['scp_tags'], data['links']
# Restrict to subset of most common tags
tags = tags.most_common(params['B_tags'])
print 'Tags used: ' + ' '.join([t for (t,c) in tags])
# Define mapping from covariates and parameters alpha, beta, kappa to
# edge probabilties
def edge_probabilities(alpha, beta, kappa, x):
N = x.shape[0]
logit_P = np.zeros((N,N))
for i in range(N):
logit_P[i,:] += alpha[0,i]
for j in range(N):
logit_P[:,j] += alpha[0,j]
logit_P += np.dot(x, beta)
logit_P += kappa
return 1.0 / (np.exp(-logit_P) + 1.0)
# Define negative log-likelihood
def neg_log_likelihood(alpha, beta, kappa, A, x):
P = edge_probabilities(alpha, beta, kappa, x)
return -(np.sum(np.log(P ** A) + np.log((1.0 - P) ** (1.0 - A))) -
np.sum(np.log(np.diag(P) ** np.diag(A)) +
np.log((1.0 - np.diag(P)) ** (1.0 - np.diag(A)))))
# Procedure to find MLE via logistic regression
def infer(A, x, fit_alpha = False):
N = A.shape[0]
B = x.shape[2]
offdiagonal = -np.diag([True] * N).reshape((N*N,))
lr = LogisticRegression(fit_intercept = True,
C = params['reg_C'], penalty = 'l2')
y = A.reshape((N*N,))[offdiagonal]
if fit_alpha:
Phi = np.zeros((N*N,(B + 2*N)))
else:
Phi = np.zeros((N*N,B))
for b in range(B):
Phi[:,b] = x[:,:,b].reshape((N*N,))
if fit_alpha:
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + i] = phi_row.reshape((N*N,))
for j in range(N):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + N + j] = phi_col.reshape((N*N,))
Phi = Phi[offdiagonal]
lr.fit(Phi, y)
coefs = lr.coef_[0]
intercept = lr.intercept_[0]
alpha = np.zeros((2,N))
out = {'alpha': alpha, 'beta': coefs[0:B], 'kappa': intercept}
if fit_alpha:
out['alpha'][0] = coefs[B:(B + N)]
out['alpha'][1] = coefs[(B + N):(B + 2*N)]
return out
# Procedure for extracting random subnetwork
def subnetwork(n):
inds = np.arange(N)
np.random.shuffle(inds)
sub = inds[0:n]
A_sub = A[sub][:,sub]
x_sub = x[sub][:,sub]
return A_sub, x_sub
# Construct connectivity matrix and covariate design matrix
nodes = scp_tags.keys()
N = len(nodes)
A = np.zeros((N,N), dtype = np.bool)
for i in range(N):
i_links = links[nodes[i]]
for j in range(N):
if nodes[j] in i_links:
A[i,j] = True
B = 2 * params['B_tags']
x = np.zeros((N,N,B))
for b in range(params['B_tags']):
tag, c = tags[b]
for i in range(N):
if tag in scp_tags[nodes[i]]:
x[i,:,b] = True
for b in range(params['B_tags']):
tag, c = tags[b]
for j in range(N):
if tag in scp_tags[nodes[j]]:
x[:,j,b+params['B_tags']] = True
# Fit model to random subnetwork
A_sub, x_sub = subnetwork(params['N_sub'])
fit = infer(A_sub, x_sub, fit_alpha = True)
print fit['beta'][0:params['B_tags']]
print fit['beta'][params['B_tags']:B]
plt.figure()
plt.hist(fit['alpha'][0], bins = 50)
plt.show() | analyze.py |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from collections import Counter
import sys
import pickle
# Parameters
params = { 'N_sub': 300,
'B_tags': 5,
'reg_C': 10.0 }
# Set random seed for reproducible output
np.random.seed(137)
# Load pickled data
source_dir = sys.argv[1]
input = open('%s/parse.pkl' % source_dir, 'rb')
data = pickle.load(input)
tags, scp_tags, links = Counter(data['tags']), data['scp_tags'], data['links']
# Restrict to subset of most common tags
tags = tags.most_common(params['B_tags'])
print 'Tags used: ' + ' '.join([t for (t,c) in tags])
# Define mapping from covariates and parameters alpha, beta, kappa to
# edge probabilties
def edge_probabilities(alpha, beta, kappa, x):
N = x.shape[0]
logit_P = np.zeros((N,N))
for i in range(N):
logit_P[i,:] += alpha[0,i]
for j in range(N):
logit_P[:,j] += alpha[0,j]
logit_P += np.dot(x, beta)
logit_P += kappa
return 1.0 / (np.exp(-logit_P) + 1.0)
# Define negative log-likelihood
def neg_log_likelihood(alpha, beta, kappa, A, x):
P = edge_probabilities(alpha, beta, kappa, x)
return -(np.sum(np.log(P ** A) + np.log((1.0 - P) ** (1.0 - A))) -
np.sum(np.log(np.diag(P) ** np.diag(A)) +
np.log((1.0 - np.diag(P)) ** (1.0 - np.diag(A)))))
# Procedure to find MLE via logistic regression
def infer(A, x, fit_alpha = False):
N = A.shape[0]
B = x.shape[2]
offdiagonal = -np.diag([True] * N).reshape((N*N,))
lr = LogisticRegression(fit_intercept = True,
C = params['reg_C'], penalty = 'l2')
y = A.reshape((N*N,))[offdiagonal]
if fit_alpha:
Phi = np.zeros((N*N,(B + 2*N)))
else:
Phi = np.zeros((N*N,B))
for b in range(B):
Phi[:,b] = x[:,:,b].reshape((N*N,))
if fit_alpha:
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + i] = phi_row.reshape((N*N,))
for j in range(N):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + N + j] = phi_col.reshape((N*N,))
Phi = Phi[offdiagonal]
lr.fit(Phi, y)
coefs = lr.coef_[0]
intercept = lr.intercept_[0]
alpha = np.zeros((2,N))
out = {'alpha': alpha, 'beta': coefs[0:B], 'kappa': intercept}
if fit_alpha:
out['alpha'][0] = coefs[B:(B + N)]
out['alpha'][1] = coefs[(B + N):(B + 2*N)]
return out
# Procedure for extracting random subnetwork
def subnetwork(n):
inds = np.arange(N)
np.random.shuffle(inds)
sub = inds[0:n]
A_sub = A[sub][:,sub]
x_sub = x[sub][:,sub]
return A_sub, x_sub
# Construct connectivity matrix and covariate design matrix
nodes = scp_tags.keys()
N = len(nodes)
A = np.zeros((N,N), dtype = np.bool)
for i in range(N):
i_links = links[nodes[i]]
for j in range(N):
if nodes[j] in i_links:
A[i,j] = True
B = 2 * params['B_tags']
x = np.zeros((N,N,B))
for b in range(params['B_tags']):
tag, c = tags[b]
for i in range(N):
if tag in scp_tags[nodes[i]]:
x[i,:,b] = True
for b in range(params['B_tags']):
tag, c = tags[b]
for j in range(N):
if tag in scp_tags[nodes[j]]:
x[:,j,b+params['B_tags']] = True
# Fit model to random subnetwork
A_sub, x_sub = subnetwork(params['N_sub'])
fit = infer(A_sub, x_sub, fit_alpha = True)
print fit['beta'][0:params['B_tags']]
print fit['beta'][params['B_tags']:B]
plt.figure()
plt.hist(fit['alpha'][0], bins = 50)
plt.show() | 0.599954 | 0.541166 |
from ast import literal_eval as make_tuple
from tqdm import tqdm
from utils.steiner_tree_te import *
import networkx as nx
import itertools
import copy
def relabel_nodes_str_to_tuple(G):
node_list = list(G.nodes)
relable_node_list = []
for node in node_list:
relable_node_list.append(make_tuple(node))
mapping_dict = {}
for node, relable_node in zip(node_list, relable_node_list):
mapping_dict[node] = relable_node
G_relabelled = nx.relabel.relabel_nodes(G, mapping_dict)
return G_relabelled
# This dummy node gets connected to all other nodes.
# For directed graphs, direction is from dummy node to all other nodes
def add_dummy_node(G, r, node_prob, gamma):
G.add_nodes_from([(r, {"prob":node_prob})])
edges_to_add = []
for v in G.nodes:
if r != v:
edges_to_add.append((r, v, gamma))
G.add_weighted_edges_from(ebunch_to_add=edges_to_add)
# This SP computation can handle negative weights (but no negative cycles)
def compute_SP_r_to_all_and_all_to_X(G, r, X):
SP = dict()
# (1) Generate shortest path from r to all nodes
# p = nx.shortest_path(G, source=r, weight="weight")
# This SP algorithm can handle negative weights
p = nx.single_source_bellman_ford_path(G, source=r, weight="weight")
SP[r] = copy.deepcopy(p)
# (2) Generate shortest path from all nodes to all terminal nodes
# IDEA: reverse all edges, then compute shortest path from t to all nodes. Then, reverse the direction of final solution
G_reversed = G.reverse()
for t in tqdm(X):
# p = nx.shortest_path(G, target=t, weight="weight")
# This SP algorithm can handle negative weights
p = nx.single_source_bellman_ford_path(G_reversed, source=t, weight="weight")
for src in p.keys():
if src in SP:
SP[src][t] = copy.deepcopy(p[src][::-1])
else:
SP[src] = dict()
SP[src][t] = copy.deepcopy(p[src][::-1])
return SP
# From the input graph, generate metric graph
# Input is G and set of nodes to include in the metric graph.
# The distance between nodes in the metric graph is the actual distance between two nodes in the real graph
# We need shortest path btw root to all terminals, and all terminals to all other terminals.
def gen_metric_graph(G, SP, metric_node_set):
G_metric = nx.DiGraph()
candidate_edges = itertools.permutations(metric_node_set, 2)
edgelist = []
for u, v in candidate_edges:
try:
sp = SP[u][v]
sp_length = get_path_length(G, sp)
edgelist.append((u, v, sp_length))
except:
continue
G_metric.add_weighted_edges_from(ebunch_to_add=edgelist)
return G_metric
# This algorithm assumes that G do not have cycles
def get_min_cost_arborescence(G_metric, r):
edgelist = []
for v in G_metric.nodes():
if v == r:
continue
# line1
min_cost_edge = 99999999.0
for edge in G_metric.in_edges(v):
if G_metric.edges[edge]["weight"] < min_cost_edge:
min_cost_edge = G_metric.edges[edge]["weight"]
# line2
for edge in G_metric.in_edges(v):
G_metric.edges[edge]["weight"] -= min_cost_edge
# line3
for edge in G_metric.in_edges(v):
if -0.00001< G_metric.edges[edge]["weight"] < 0.00001:
edgelist.append((edge[0], edge[1], 0.0))
MCA = nx.DiGraph()
MCA.add_weighted_edges_from(ebunch_to_add=edgelist)
return MCA
# This method ensures the solution is tree
def metric_to_original_ensure_tree(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
# Walk up the path, and stop adding nodes once a node is already in the tree
for idx in range(len(sp)-2, -1, -1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
if s in T:
break
T.add_weighted_edges_from(ebunch_to_add=edgelist)
return T
# This method makes the tree disconnected
def metric_to_original(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
for idx in range(len(sp)-1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
T_path = nx.DiGraph()
T_path.add_weighted_edges_from(ebunch_to_add=edgelist)
TUT_best(T, T_path, X)
return T
# This method simply adds paths
def metric_to_original_v2(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
for idx in range(len(sp)-1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
T_path = nx.DiGraph()
T_path.add_weighted_edges_from(ebunch_to_add=edgelist)
# TUT_best(T, T_path, X)
T.update(deepcopy(T_path))
return T | utils/networkx_operations.py | from ast import literal_eval as make_tuple
from tqdm import tqdm
from utils.steiner_tree_te import *
import networkx as nx
import itertools
import copy
def relabel_nodes_str_to_tuple(G):
node_list = list(G.nodes)
relable_node_list = []
for node in node_list:
relable_node_list.append(make_tuple(node))
mapping_dict = {}
for node, relable_node in zip(node_list, relable_node_list):
mapping_dict[node] = relable_node
G_relabelled = nx.relabel.relabel_nodes(G, mapping_dict)
return G_relabelled
# This dummy node gets connected to all other nodes.
# For directed graphs, direction is from dummy node to all other nodes
def add_dummy_node(G, r, node_prob, gamma):
G.add_nodes_from([(r, {"prob":node_prob})])
edges_to_add = []
for v in G.nodes:
if r != v:
edges_to_add.append((r, v, gamma))
G.add_weighted_edges_from(ebunch_to_add=edges_to_add)
# This SP computation can handle negative weights (but no negative cycles)
def compute_SP_r_to_all_and_all_to_X(G, r, X):
SP = dict()
# (1) Generate shortest path from r to all nodes
# p = nx.shortest_path(G, source=r, weight="weight")
# This SP algorithm can handle negative weights
p = nx.single_source_bellman_ford_path(G, source=r, weight="weight")
SP[r] = copy.deepcopy(p)
# (2) Generate shortest path from all nodes to all terminal nodes
# IDEA: reverse all edges, then compute shortest path from t to all nodes. Then, reverse the direction of final solution
G_reversed = G.reverse()
for t in tqdm(X):
# p = nx.shortest_path(G, target=t, weight="weight")
# This SP algorithm can handle negative weights
p = nx.single_source_bellman_ford_path(G_reversed, source=t, weight="weight")
for src in p.keys():
if src in SP:
SP[src][t] = copy.deepcopy(p[src][::-1])
else:
SP[src] = dict()
SP[src][t] = copy.deepcopy(p[src][::-1])
return SP
# From the input graph, generate metric graph
# Input is G and set of nodes to include in the metric graph.
# The distance between nodes in the metric graph is the actual distance between two nodes in the real graph
# We need shortest path btw root to all terminals, and all terminals to all other terminals.
def gen_metric_graph(G, SP, metric_node_set):
G_metric = nx.DiGraph()
candidate_edges = itertools.permutations(metric_node_set, 2)
edgelist = []
for u, v in candidate_edges:
try:
sp = SP[u][v]
sp_length = get_path_length(G, sp)
edgelist.append((u, v, sp_length))
except:
continue
G_metric.add_weighted_edges_from(ebunch_to_add=edgelist)
return G_metric
# This algorithm assumes that G do not have cycles
def get_min_cost_arborescence(G_metric, r):
edgelist = []
for v in G_metric.nodes():
if v == r:
continue
# line1
min_cost_edge = 99999999.0
for edge in G_metric.in_edges(v):
if G_metric.edges[edge]["weight"] < min_cost_edge:
min_cost_edge = G_metric.edges[edge]["weight"]
# line2
for edge in G_metric.in_edges(v):
G_metric.edges[edge]["weight"] -= min_cost_edge
# line3
for edge in G_metric.in_edges(v):
if -0.00001< G_metric.edges[edge]["weight"] < 0.00001:
edgelist.append((edge[0], edge[1], 0.0))
MCA = nx.DiGraph()
MCA.add_weighted_edges_from(ebunch_to_add=edgelist)
return MCA
# This method ensures the solution is tree
def metric_to_original_ensure_tree(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
# Walk up the path, and stop adding nodes once a node is already in the tree
for idx in range(len(sp)-2, -1, -1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
if s in T:
break
T.add_weighted_edges_from(ebunch_to_add=edgelist)
return T
# This method makes the tree disconnected
def metric_to_original(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
for idx in range(len(sp)-1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
T_path = nx.DiGraph()
T_path.add_weighted_edges_from(ebunch_to_add=edgelist)
TUT_best(T, T_path, X)
return T
# This method simply adds paths
def metric_to_original_v2(G, MCA_metric, SP, X):
T = nx.DiGraph()
for u, v in MCA_metric.edges():
edgelist = []
sp = SP[u][v]
for idx in range(len(sp)-1):
s = sp[idx]
t = sp[idx+1]
weight = G.edges[s,t]["weight"]
edgelist.append((s, t, weight))
T_path = nx.DiGraph()
T_path.add_weighted_edges_from(ebunch_to_add=edgelist)
# TUT_best(T, T_path, X)
T.update(deepcopy(T_path))
return T | 0.487307 | 0.467271 |
import itertools
import numpy as np
import pandas as pd
import tensorflow as tf
MODEL_DIR = "model_checkpoints"
def pandas_input_fn(
df, y_col=None, batch_size=128, num_epochs=1, shuffle=False, seed=None
):
"""Pandas input function for TensorFlow high-level API Estimator.
This function returns tf.data.Dataset function.
Note. tf.estimator.inputs.pandas_input_fn cannot handle array/list column properly.
For more information, see (https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn)
Args:
df (pd.DataFrame): Data containing features.
y_col (str): Label column name if df has it.
batch_size (int): Batch size for the input function.
num_epochs (int): Number of epochs to iterate over data. If None will run forever.
shuffle (bool): If True, shuffles the data queue.
seed (int): Random seed for shuffle.
Returns:
tf.data.Dataset function
"""
X_df = df.copy()
if y_col is not None:
y = X_df.pop(y_col).values
else:
y = None
X = {}
for col in X_df.columns:
values = X_df[col].values
if isinstance(values[0], (list, np.ndarray)):
values = np.array([l for l in values], dtype=np.float32)
X[col] = values
return lambda: _dataset(
x=X,
y=y,
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
)
def _dataset(x, y=None, batch_size=128, num_epochs=1, shuffle=False, seed=None):
if y is None:
dataset = tf.data.Dataset.from_tensor_slices(x)
else:
dataset = tf.data.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(
1000, seed=seed, reshuffle_each_iteration=True # buffer size = 1000
)
elif seed is not None:
import warnings
warnings.warn("Seed was set but `shuffle=False`. Seed will be ignored.")
return dataset.repeat(num_epochs).batch(batch_size)
def build_optimizer(name, lr=0.001, **kwargs):
"""Get an optimizer for TensorFlow high-level API Estimator.
Args:
name (str): Optimizer name. Note, to use 'Momentum', should specify
lr (float): Learning rate
kwargs: Optimizer arguments as key-value pairs
Returns:
tf.train.Optimizer
"""
optimizers = dict(
adadelta=tf.train.AdadeltaOptimizer,
adagrad=tf.train.AdagradOptimizer,
adam=tf.train.AdamOptimizer,
ftrl=tf.train.FtrlOptimizer,
momentum=tf.train.MomentumOptimizer,
rmsprop=tf.train.RMSPropOptimizer,
sgd=tf.train.GradientDescentOptimizer,
)
try:
optimizer_class = optimizers[name.lower()]
except KeyError:
raise KeyError(
"Optimizer name should be one of: [{}]".format(", ".join(optimizers.keys()))
)
# assign default values
if name.lower() == "momentum" and "momentum" not in kwargs:
kwargs["momentum"] = 0.9
return optimizer_class(learning_rate=lr, **kwargs)
def evaluation_log_hook(
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter=10000,
model_dir=None,
batch_size=256,
eval_fns=None,
**eval_kwargs
):
"""Evaluation log hook for TensorFlow high-levmodel_direl API Estimator.
Note, to evaluate the model in the middle of training (by using this hook),
the model checkpointing steps should be equal or larger than the hook's since
TensorFlow Estimator uses the last checkpoint for evaluation or prediction.
Checkpoint frequency can be set via Estimator's run config.
Args:
estimator (tf.estimator.Estimator): Model to evaluate.
logger (Logger): Custom logger to log the results. E.g., define a subclass of Logger for AzureML logging.
true_df (pd.DataFrame): Ground-truth data.
y_col (str): Label column name in true_df
eval_df (pd.DataFrame): Evaluation data. May not include the label column as
some evaluation functions do not allow.
every_n_iter (int): Evaluation frequency (steps). Should be equal or larger than checkpointing steps.
model_dir (str): Model directory to save the summaries to. If None, does not record.
batch_size (int): Number of samples fed into the model at a time.
Note, the batch size doesn't affect on evaluation results.
eval_fns (iterable of functions): List of evaluation functions that have signature of
(true_df, prediction_df, **eval_kwargs)->(float). If None, loss is calculated on true_df.
**eval_kwargs: Evaluation function's keyword arguments. Note, prediction column name should be 'prediction'
Returns:
tf.train.SessionRunHook: Session run hook to evaluate the model while training.
"""
return _TrainLogHook(
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter,
model_dir,
batch_size,
eval_fns,
**eval_kwargs
)
class MetricsLogger:
def __init__(self):
"""Log metrics. Each metric's log will be stored in the corresponding list."""
self._log = {}
def log(self, metric, value):
if metric not in self._log:
self._log[metric] = []
self._log[metric].append(value)
def get_log(self):
return self._log
class _TrainLogHook(tf.train.SessionRunHook):
def __init__(
self,
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter=1000,
model_dir=None,
batch_size=256,
eval_fns=None,
**eval_kwargs
):
"""Evaluation log hook class"""
self.model = estimator
self.logger = logger
self.true_df = true_df
self.y_col = y_col
self.eval_df = eval_df
self.every_n_iter = every_n_iter
self.model_dir = model_dir
self.batch_size = batch_size
self.eval_fns = eval_fns
self.eval_kwargs = eval_kwargs
self.summary_writer = None
self.global_step_tensor = None
self.step = 0
def begin(self):
if self.model_dir is not None:
self.summary_writer = tf.summary.FileWriterCache.get(self.model_dir)
self.global_step_tensor = tf.train.get_or_create_global_step()
else:
self.step = 0
def before_run(self, run_context):
if self.global_step_tensor is not None:
requests = {"global_step": self.global_step_tensor}
return tf.train.SessionRunArgs(requests)
else:
return None
def after_run(self, run_context, run_values):
if self.global_step_tensor is not None:
self.step = run_values.results["global_step"]
else:
self.step += 1
if self.step > 1 and self.step % self.every_n_iter == 0:
_prev_log_level = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
if self.eval_fns is None:
result = self.model.evaluate(
input_fn=pandas_input_fn(
df=self.true_df, y_col=self.y_col, batch_size=self.batch_size
)
)["average_loss"]
self._log("validation_loss", result)
else:
predictions = list(
itertools.islice(
self.model.predict(
input_fn=pandas_input_fn(
df=self.eval_df, batch_size=self.batch_size
)
),
len(self.eval_df),
)
)
prediction_df = self.eval_df.copy()
prediction_df["prediction"] = [p["predictions"][0] for p in predictions]
for fn in self.eval_fns:
result = fn(self.true_df, prediction_df, **self.eval_kwargs)
self._log(fn.__name__, result)
tf.logging.set_verbosity(_prev_log_level)
def end(self, session):
if self.summary_writer is not None:
self.summary_writer.flush()
def _log(self, tag, value):
self.logger.log(tag, value)
if self.summary_writer is not None:
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.summary_writer.add_summary(summary, self.step) | reco_utils/common/tf_utils.py |
import itertools
import numpy as np
import pandas as pd
import tensorflow as tf
MODEL_DIR = "model_checkpoints"
def pandas_input_fn(
df, y_col=None, batch_size=128, num_epochs=1, shuffle=False, seed=None
):
"""Pandas input function for TensorFlow high-level API Estimator.
This function returns tf.data.Dataset function.
Note. tf.estimator.inputs.pandas_input_fn cannot handle array/list column properly.
For more information, see (https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn)
Args:
df (pd.DataFrame): Data containing features.
y_col (str): Label column name if df has it.
batch_size (int): Batch size for the input function.
num_epochs (int): Number of epochs to iterate over data. If None will run forever.
shuffle (bool): If True, shuffles the data queue.
seed (int): Random seed for shuffle.
Returns:
tf.data.Dataset function
"""
X_df = df.copy()
if y_col is not None:
y = X_df.pop(y_col).values
else:
y = None
X = {}
for col in X_df.columns:
values = X_df[col].values
if isinstance(values[0], (list, np.ndarray)):
values = np.array([l for l in values], dtype=np.float32)
X[col] = values
return lambda: _dataset(
x=X,
y=y,
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
)
def _dataset(x, y=None, batch_size=128, num_epochs=1, shuffle=False, seed=None):
if y is None:
dataset = tf.data.Dataset.from_tensor_slices(x)
else:
dataset = tf.data.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(
1000, seed=seed, reshuffle_each_iteration=True # buffer size = 1000
)
elif seed is not None:
import warnings
warnings.warn("Seed was set but `shuffle=False`. Seed will be ignored.")
return dataset.repeat(num_epochs).batch(batch_size)
def build_optimizer(name, lr=0.001, **kwargs):
"""Get an optimizer for TensorFlow high-level API Estimator.
Args:
name (str): Optimizer name. Note, to use 'Momentum', should specify
lr (float): Learning rate
kwargs: Optimizer arguments as key-value pairs
Returns:
tf.train.Optimizer
"""
optimizers = dict(
adadelta=tf.train.AdadeltaOptimizer,
adagrad=tf.train.AdagradOptimizer,
adam=tf.train.AdamOptimizer,
ftrl=tf.train.FtrlOptimizer,
momentum=tf.train.MomentumOptimizer,
rmsprop=tf.train.RMSPropOptimizer,
sgd=tf.train.GradientDescentOptimizer,
)
try:
optimizer_class = optimizers[name.lower()]
except KeyError:
raise KeyError(
"Optimizer name should be one of: [{}]".format(", ".join(optimizers.keys()))
)
# assign default values
if name.lower() == "momentum" and "momentum" not in kwargs:
kwargs["momentum"] = 0.9
return optimizer_class(learning_rate=lr, **kwargs)
def evaluation_log_hook(
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter=10000,
model_dir=None,
batch_size=256,
eval_fns=None,
**eval_kwargs
):
"""Evaluation log hook for TensorFlow high-levmodel_direl API Estimator.
Note, to evaluate the model in the middle of training (by using this hook),
the model checkpointing steps should be equal or larger than the hook's since
TensorFlow Estimator uses the last checkpoint for evaluation or prediction.
Checkpoint frequency can be set via Estimator's run config.
Args:
estimator (tf.estimator.Estimator): Model to evaluate.
logger (Logger): Custom logger to log the results. E.g., define a subclass of Logger for AzureML logging.
true_df (pd.DataFrame): Ground-truth data.
y_col (str): Label column name in true_df
eval_df (pd.DataFrame): Evaluation data. May not include the label column as
some evaluation functions do not allow.
every_n_iter (int): Evaluation frequency (steps). Should be equal or larger than checkpointing steps.
model_dir (str): Model directory to save the summaries to. If None, does not record.
batch_size (int): Number of samples fed into the model at a time.
Note, the batch size doesn't affect on evaluation results.
eval_fns (iterable of functions): List of evaluation functions that have signature of
(true_df, prediction_df, **eval_kwargs)->(float). If None, loss is calculated on true_df.
**eval_kwargs: Evaluation function's keyword arguments. Note, prediction column name should be 'prediction'
Returns:
tf.train.SessionRunHook: Session run hook to evaluate the model while training.
"""
return _TrainLogHook(
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter,
model_dir,
batch_size,
eval_fns,
**eval_kwargs
)
class MetricsLogger:
def __init__(self):
"""Log metrics. Each metric's log will be stored in the corresponding list."""
self._log = {}
def log(self, metric, value):
if metric not in self._log:
self._log[metric] = []
self._log[metric].append(value)
def get_log(self):
return self._log
class _TrainLogHook(tf.train.SessionRunHook):
def __init__(
self,
estimator,
logger,
true_df,
y_col,
eval_df,
every_n_iter=1000,
model_dir=None,
batch_size=256,
eval_fns=None,
**eval_kwargs
):
"""Evaluation log hook class"""
self.model = estimator
self.logger = logger
self.true_df = true_df
self.y_col = y_col
self.eval_df = eval_df
self.every_n_iter = every_n_iter
self.model_dir = model_dir
self.batch_size = batch_size
self.eval_fns = eval_fns
self.eval_kwargs = eval_kwargs
self.summary_writer = None
self.global_step_tensor = None
self.step = 0
def begin(self):
if self.model_dir is not None:
self.summary_writer = tf.summary.FileWriterCache.get(self.model_dir)
self.global_step_tensor = tf.train.get_or_create_global_step()
else:
self.step = 0
def before_run(self, run_context):
if self.global_step_tensor is not None:
requests = {"global_step": self.global_step_tensor}
return tf.train.SessionRunArgs(requests)
else:
return None
def after_run(self, run_context, run_values):
if self.global_step_tensor is not None:
self.step = run_values.results["global_step"]
else:
self.step += 1
if self.step > 1 and self.step % self.every_n_iter == 0:
_prev_log_level = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
if self.eval_fns is None:
result = self.model.evaluate(
input_fn=pandas_input_fn(
df=self.true_df, y_col=self.y_col, batch_size=self.batch_size
)
)["average_loss"]
self._log("validation_loss", result)
else:
predictions = list(
itertools.islice(
self.model.predict(
input_fn=pandas_input_fn(
df=self.eval_df, batch_size=self.batch_size
)
),
len(self.eval_df),
)
)
prediction_df = self.eval_df.copy()
prediction_df["prediction"] = [p["predictions"][0] for p in predictions]
for fn in self.eval_fns:
result = fn(self.true_df, prediction_df, **self.eval_kwargs)
self._log(fn.__name__, result)
tf.logging.set_verbosity(_prev_log_level)
def end(self, session):
if self.summary_writer is not None:
self.summary_writer.flush()
def _log(self, tag, value):
self.logger.log(tag, value)
if self.summary_writer is not None:
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.summary_writer.add_summary(summary, self.step) | 0.894709 | 0.470372 |
# Copyright (c) 2011 <NAME>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
import sys
import codecs
from optparse import OptionParser
from screenplain.parsers import fountain
output_formats = (
'fdx', 'html', 'pdf'
)
usage = """Usage: %prog [options] [input-file [output-file]]
If a file name parameter is missing or a dash (-), input will be read
from standard input and output will be written to standard output.
Screenplain will try to auto-detect the output format if
an output-file is given. Otherwise use the --format option."""
def invalid_format(parser, message):
parser.error(
'%s\nUse --format with one of the following formats: %s' %
(message, ' '.join(output_formats))
)
def main(args):
parser = OptionParser(usage=usage)
parser.add_option(
'-f', '--format', dest='output_format',
metavar='FORMAT',
help=(
'Set what kind of file to create. FORMAT can be one of ' +
', '.join(output_formats)
)
)
parser.add_option(
'--bare',
action='store_true',
dest='bare',
help=(
'For HTML output, only output the actual screenplay, '
'not a complete HTML document.'
)
)
parser.add_option(
'--css',
metavar='FILE',
help=(
'For HTML output, inline the given CSS file in the HTML document '
'instead of the default.'
)
)
parser.add_option(
'--strong',
action='store_true',
dest='strong',
help=(
'For PDF output, scene headings will appear '
'Bold and Underlined.'
)
)
options, args = parser.parse_args(args)
if len(args) >= 3:
parser.error('Too many arguments')
input_file = (len(args) > 0 and args[0] != '-') and args[0] or None
output_file = (len(args) > 1 and args[1] != '-') and args[1] or None
format = options.output_format
if format is None and output_file:
if output_file.endswith('.fdx'):
format = 'fdx'
elif output_file.endswith('.html'):
format = 'html'
elif output_file.endswith('.pdf'):
format = 'pdf'
else:
invalid_format(
parser,
'Could not detect output format from file name ' + output_file
)
if format not in output_formats:
invalid_format(
parser, 'Unsupported output format: "%s".' % format
)
if input_file:
input = codecs.open(input_file, 'r', 'utf-8-sig')
else:
input = codecs.getreader('utf-8')(sys.stdin.buffer)
screenplay = fountain.parse(input)
if format == 'pdf':
output_encoding = None
else:
output_encoding = 'utf-8'
if output_file:
if output_encoding:
output = codecs.open(output_file, 'w', output_encoding)
else:
output = open(output_file, 'wb')
else:
if output_encoding:
output = codecs.getwriter(output_encoding)(sys.stdout.buffer)
else:
output = sys.stdout.buffer
try:
if format == 'text':
from screenplain.export.text import to_text
to_text(screenplay, output)
elif format == 'fdx':
from screenplain.export.fdx import to_fdx
to_fdx(screenplay, output)
elif format == 'html':
from screenplain.export.html import convert
convert(
screenplay, output,
css_file=options.css, bare=options.bare
)
elif format == 'pdf':
from screenplain.export.pdf import to_pdf
to_pdf(screenplay, output, is_strong=options.strong)
finally:
if output_file:
output.close()
def cli():
"""setup.py entry point for console scripts."""
main(sys.argv[1:])
if __name__ == '__main__':
main(sys.argv[1:]) | screenplain/main.py |
# Copyright (c) 2011 <NAME>
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
import sys
import codecs
from optparse import OptionParser
from screenplain.parsers import fountain
output_formats = (
'fdx', 'html', 'pdf'
)
usage = """Usage: %prog [options] [input-file [output-file]]
If a file name parameter is missing or a dash (-), input will be read
from standard input and output will be written to standard output.
Screenplain will try to auto-detect the output format if
an output-file is given. Otherwise use the --format option."""
def invalid_format(parser, message):
parser.error(
'%s\nUse --format with one of the following formats: %s' %
(message, ' '.join(output_formats))
)
def main(args):
parser = OptionParser(usage=usage)
parser.add_option(
'-f', '--format', dest='output_format',
metavar='FORMAT',
help=(
'Set what kind of file to create. FORMAT can be one of ' +
', '.join(output_formats)
)
)
parser.add_option(
'--bare',
action='store_true',
dest='bare',
help=(
'For HTML output, only output the actual screenplay, '
'not a complete HTML document.'
)
)
parser.add_option(
'--css',
metavar='FILE',
help=(
'For HTML output, inline the given CSS file in the HTML document '
'instead of the default.'
)
)
parser.add_option(
'--strong',
action='store_true',
dest='strong',
help=(
'For PDF output, scene headings will appear '
'Bold and Underlined.'
)
)
options, args = parser.parse_args(args)
if len(args) >= 3:
parser.error('Too many arguments')
input_file = (len(args) > 0 and args[0] != '-') and args[0] or None
output_file = (len(args) > 1 and args[1] != '-') and args[1] or None
format = options.output_format
if format is None and output_file:
if output_file.endswith('.fdx'):
format = 'fdx'
elif output_file.endswith('.html'):
format = 'html'
elif output_file.endswith('.pdf'):
format = 'pdf'
else:
invalid_format(
parser,
'Could not detect output format from file name ' + output_file
)
if format not in output_formats:
invalid_format(
parser, 'Unsupported output format: "%s".' % format
)
if input_file:
input = codecs.open(input_file, 'r', 'utf-8-sig')
else:
input = codecs.getreader('utf-8')(sys.stdin.buffer)
screenplay = fountain.parse(input)
if format == 'pdf':
output_encoding = None
else:
output_encoding = 'utf-8'
if output_file:
if output_encoding:
output = codecs.open(output_file, 'w', output_encoding)
else:
output = open(output_file, 'wb')
else:
if output_encoding:
output = codecs.getwriter(output_encoding)(sys.stdout.buffer)
else:
output = sys.stdout.buffer
try:
if format == 'text':
from screenplain.export.text import to_text
to_text(screenplay, output)
elif format == 'fdx':
from screenplain.export.fdx import to_fdx
to_fdx(screenplay, output)
elif format == 'html':
from screenplain.export.html import convert
convert(
screenplay, output,
css_file=options.css, bare=options.bare
)
elif format == 'pdf':
from screenplain.export.pdf import to_pdf
to_pdf(screenplay, output, is_strong=options.strong)
finally:
if output_file:
output.close()
def cli():
"""setup.py entry point for console scripts."""
main(sys.argv[1:])
if __name__ == '__main__':
main(sys.argv[1:]) | 0.591487 | 0.250683 |
import unittest
from pypika import (
Array,
Bracket,
Dialects,
PostgreSQLQuery,
Query,
Tables,
Tuple,
)
class TupleTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_tuple_equality_tuple_on_both(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar) == Tuple(1, 2))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar")=(1,2)', str(q))
def test_tuple_equality_tuple_on_left(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar) == (1, 2))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar")=(1,2)', str(q))
def test_tuple_equality_tuple_on_right(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where((self.table_abc.foo, self.table_abc.bar) == Tuple(1, 2))
# Order is reversed due to lack of right equals method
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE (1,2)=("foo","bar")', str(q))
def test_tuple_in_using_python_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([(1, 1), (2, 2), (3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuple_in_using_pypika_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([Tuple(1, 1), Tuple(2, 2), Tuple(3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuple_in_using_mixed_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([(1, 1), Tuple(2, 2), (3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuples_in_join(self):
query = Query.from_(self.table_abc) \
.join(self.table_efg) \
.on(self.table_abc.foo == self.table_efg.bar) \
.select('*') \
.where(Tuple(self.table_abc.foo, self.table_efg.bar).isin([(1, 1), Tuple(2, 2), (3, 3)]))
self.assertEqual('SELECT * FROM "abc" JOIN "efg" ON "abc"."foo"="efg"."bar" '
'WHERE ("abc"."foo","efg"."bar") IN ((1,1),(2,2),(3,3))', str(query))
class ArrayTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_array_general(self):
query = Query.from_(self.table_abc) \
.select(Array(1, 'a', ['b', 2, 3]))
self.assertEqual('SELECT [1,\'a\',[\'b\',2,3]] FROM "abc"', str(query))
self.assertEqual('SELECT [1,\'a\',[\'b\',2,3]] FROM "abc"', str(query.get_sql()))
def test_array_postgresql(self):
query = PostgreSQLQuery.from_(self.table_abc) \
.select(Array(1, 'a', ['b', 2, 3]))
self.assertEqual('SELECT ARRAY[1,\'a\',ARRAY[\'b\',2,3]] FROM "abc"', str(query))
self.assertEqual('SELECT ARRAY[1,\'a\',ARRAY[\'b\',2,3]] FROM "abc"', query.get_sql())
class BracketTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_arithmetic_with_brackets(self):
q = Query \
.from_(self.table_abc) \
.select(Bracket(self.table_abc.foo / 2) / 2)
self.assertEqual('SELECT ("foo"/2)/2 FROM "abc"', str(q))
def test_arithmetic_with_brackets_and_alias(self):
q = Query \
.from_(self.table_abc) \
.select(Bracket(self.table_abc.foo / 2).as_('alias'))
self.assertEqual('SELECT ("foo"/2) "alias" FROM "abc"', str(q)) | pypika/tests/test_tuples.py | import unittest
from pypika import (
Array,
Bracket,
Dialects,
PostgreSQLQuery,
Query,
Tables,
Tuple,
)
class TupleTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_tuple_equality_tuple_on_both(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar) == Tuple(1, 2))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar")=(1,2)', str(q))
def test_tuple_equality_tuple_on_left(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar) == (1, 2))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar")=(1,2)', str(q))
def test_tuple_equality_tuple_on_right(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where((self.table_abc.foo, self.table_abc.bar) == Tuple(1, 2))
# Order is reversed due to lack of right equals method
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE (1,2)=("foo","bar")', str(q))
def test_tuple_in_using_python_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([(1, 1), (2, 2), (3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuple_in_using_pypika_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([Tuple(1, 1), Tuple(2, 2), Tuple(3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuple_in_using_mixed_tuples(self):
q = Query.from_(self.table_abc) \
.select(self.table_abc.foo, self.table_abc.bar) \
.where(Tuple(self.table_abc.foo, self.table_abc.bar).isin([(1, 1), Tuple(2, 2), (3, 3)]))
self.assertEqual('SELECT "foo","bar" FROM "abc" '
'WHERE ("foo","bar") IN ((1,1),(2,2),(3,3))', str(q))
def test_tuples_in_join(self):
query = Query.from_(self.table_abc) \
.join(self.table_efg) \
.on(self.table_abc.foo == self.table_efg.bar) \
.select('*') \
.where(Tuple(self.table_abc.foo, self.table_efg.bar).isin([(1, 1), Tuple(2, 2), (3, 3)]))
self.assertEqual('SELECT * FROM "abc" JOIN "efg" ON "abc"."foo"="efg"."bar" '
'WHERE ("abc"."foo","efg"."bar") IN ((1,1),(2,2),(3,3))', str(query))
class ArrayTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_array_general(self):
query = Query.from_(self.table_abc) \
.select(Array(1, 'a', ['b', 2, 3]))
self.assertEqual('SELECT [1,\'a\',[\'b\',2,3]] FROM "abc"', str(query))
self.assertEqual('SELECT [1,\'a\',[\'b\',2,3]] FROM "abc"', str(query.get_sql()))
def test_array_postgresql(self):
query = PostgreSQLQuery.from_(self.table_abc) \
.select(Array(1, 'a', ['b', 2, 3]))
self.assertEqual('SELECT ARRAY[1,\'a\',ARRAY[\'b\',2,3]] FROM "abc"', str(query))
self.assertEqual('SELECT ARRAY[1,\'a\',ARRAY[\'b\',2,3]] FROM "abc"', query.get_sql())
class BracketTests(unittest.TestCase):
table_abc, table_efg = Tables('abc', 'efg')
def test_arithmetic_with_brackets(self):
q = Query \
.from_(self.table_abc) \
.select(Bracket(self.table_abc.foo / 2) / 2)
self.assertEqual('SELECT ("foo"/2)/2 FROM "abc"', str(q))
def test_arithmetic_with_brackets_and_alias(self):
q = Query \
.from_(self.table_abc) \
.select(Bracket(self.table_abc.foo / 2).as_('alias'))
self.assertEqual('SELECT ("foo"/2) "alias" FROM "abc"', str(q)) | 0.658966 | 0.471041 |
from collections import defaultdict
class graph:
def __init__(self):
self.nodes=set()
self.edges=defaultdict(list)
def add_node(self,value):
self.nodes.add(value)
def add_edge(self,from_node, to_node ):
self.edges[from_node].append(to_node)
def dijkstra(initial,graph):
visited=[]
valid=[initial]
i=0
path={initial:initial}
while len(valid)>0:
i+=1
current = min(valid,key=lambda x: path[x])
visited.append(current)
valid.remove(current)
for node in graph.edges[current]:
if node not in valid and node not in visited:
valid.append(node)
else:
pass
if node not in path:
path[node]=path[current]+node
else:
if path[node]>(path[current]+node):
path[node]=path[current]+node
return path
matrix=[]
fi=open('input.txt','r')
for line in fi:
line=line.split(',') #Matrix1 is used to calculate
line=list(map(int, line)) #d{} which counts repeated node's frequency
matrix.append(line)
fi.close()
d={}
for _ in range(80):
for node in range(80):
jup=0
for k in matrix:
l=k.count(matrix[_][node])
jup+=l
if jup > 1:
d[matrix[_][node]]=jup
matrix2=[]
f2=open('input.txt','r')
for lin in f2:
lin=lin.split(',')
lin=list(map(int,lin)) #Matrix2 gets rid of repeated nodes
for p in range(len(lin)):
if lin[p] in d:
d[lin[p]]-=1
lin[p]=lin[p]+ ((d[lin[p]]+1)/10000)
matrix2.append(lin)
G=graph()
#. Adding edges of of non boundry nodes os matrix2
for _ in range(1,len(matrix2)-1):
for node in range(1,len(matrix2[_])-1):
G.add_node(matrix2[_][node])
G.add_edge(matrix2[_][node],matrix2[_][node+1])
G.add_edge(matrix2[_][node],matrix2[_][node-1])
G.add_edge(matrix2[_][node],matrix2[_+1][node])
G.add_edge(matrix2[_][node],matrix2[_-1][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[0][node])
G.add_edge(matrix2[0][node],matrix2[0][node-1])
G.add_edge(matrix2[0][node],matrix2[0][node+1]) #Adding edges to first row
G.add_edge(matrix2[0][node],matrix2[1][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[node][0])
G.add_edge(matrix2[node][0],matrix2[node-1][0])
G.add_edge(matrix2[node][0],matrix2[node+1][0]) #Adding edges to first column
G.add_edge(matrix2[node][0],matrix2[node][1])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[79][node])
G.add_edge(matrix2[79][node],matrix2[79][node-1])
G.add_edge(matrix2[79][node],matrix2[79][node+1]) #Adding edges to last row
G.add_edge(matrix2[79][node],matrix2[78][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[node][79])
G.add_edge(matrix2[node][79],matrix2[node-1][79])
G.add_edge(matrix2[node][79],matrix2[node+1][79]) #Adding edges to last column
G.add_edge(matrix2[node][79],matrix2[node][78])
#Adding edges to corner nodes
G.add_node(matrix2[0][0])
G.add_edge(matrix2[0][0],matrix2[0][1])
G.add_edge(matrix2[0][0],matrix2[1][0])
G.add_node(matrix2[79][0])
G.add_edge(matrix2[79][0],matrix2[79][1])
G.add_edge(matrix2[79][0],matrix2[78][0])
G.add_node(matrix2[79][79])
G.add_edge(matrix2[79][79],matrix2[79][78])
G.add_edge(matrix2[79][79],matrix2[78][79])
G.add_node(matrix2[0][79])
G.add_edge(matrix2[0][79],matrix2[0][78])
G.add_edge(matrix2[0][79],matrix2[1][79])
a=dijkstra(matrix2[0][0],G)
print(int(a[matrix2[-1][-1]])) | project_euler/problem_83/sol1.py | from collections import defaultdict
class graph:
def __init__(self):
self.nodes=set()
self.edges=defaultdict(list)
def add_node(self,value):
self.nodes.add(value)
def add_edge(self,from_node, to_node ):
self.edges[from_node].append(to_node)
def dijkstra(initial,graph):
visited=[]
valid=[initial]
i=0
path={initial:initial}
while len(valid)>0:
i+=1
current = min(valid,key=lambda x: path[x])
visited.append(current)
valid.remove(current)
for node in graph.edges[current]:
if node not in valid and node not in visited:
valid.append(node)
else:
pass
if node not in path:
path[node]=path[current]+node
else:
if path[node]>(path[current]+node):
path[node]=path[current]+node
return path
matrix=[]
fi=open('input.txt','r')
for line in fi:
line=line.split(',') #Matrix1 is used to calculate
line=list(map(int, line)) #d{} which counts repeated node's frequency
matrix.append(line)
fi.close()
d={}
for _ in range(80):
for node in range(80):
jup=0
for k in matrix:
l=k.count(matrix[_][node])
jup+=l
if jup > 1:
d[matrix[_][node]]=jup
matrix2=[]
f2=open('input.txt','r')
for lin in f2:
lin=lin.split(',')
lin=list(map(int,lin)) #Matrix2 gets rid of repeated nodes
for p in range(len(lin)):
if lin[p] in d:
d[lin[p]]-=1
lin[p]=lin[p]+ ((d[lin[p]]+1)/10000)
matrix2.append(lin)
G=graph()
#. Adding edges of of non boundry nodes os matrix2
for _ in range(1,len(matrix2)-1):
for node in range(1,len(matrix2[_])-1):
G.add_node(matrix2[_][node])
G.add_edge(matrix2[_][node],matrix2[_][node+1])
G.add_edge(matrix2[_][node],matrix2[_][node-1])
G.add_edge(matrix2[_][node],matrix2[_+1][node])
G.add_edge(matrix2[_][node],matrix2[_-1][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[0][node])
G.add_edge(matrix2[0][node],matrix2[0][node-1])
G.add_edge(matrix2[0][node],matrix2[0][node+1]) #Adding edges to first row
G.add_edge(matrix2[0][node],matrix2[1][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[node][0])
G.add_edge(matrix2[node][0],matrix2[node-1][0])
G.add_edge(matrix2[node][0],matrix2[node+1][0]) #Adding edges to first column
G.add_edge(matrix2[node][0],matrix2[node][1])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[79][node])
G.add_edge(matrix2[79][node],matrix2[79][node-1])
G.add_edge(matrix2[79][node],matrix2[79][node+1]) #Adding edges to last row
G.add_edge(matrix2[79][node],matrix2[78][node])
for node in range(1,len(matrix2)-1):
G.add_node(matrix2[node][79])
G.add_edge(matrix2[node][79],matrix2[node-1][79])
G.add_edge(matrix2[node][79],matrix2[node+1][79]) #Adding edges to last column
G.add_edge(matrix2[node][79],matrix2[node][78])
#Adding edges to corner nodes
G.add_node(matrix2[0][0])
G.add_edge(matrix2[0][0],matrix2[0][1])
G.add_edge(matrix2[0][0],matrix2[1][0])
G.add_node(matrix2[79][0])
G.add_edge(matrix2[79][0],matrix2[79][1])
G.add_edge(matrix2[79][0],matrix2[78][0])
G.add_node(matrix2[79][79])
G.add_edge(matrix2[79][79],matrix2[79][78])
G.add_edge(matrix2[79][79],matrix2[78][79])
G.add_node(matrix2[0][79])
G.add_edge(matrix2[0][79],matrix2[0][78])
G.add_edge(matrix2[0][79],matrix2[1][79])
a=dijkstra(matrix2[0][0],G)
print(int(a[matrix2[-1][-1]])) | 0.275812 | 0.383843 |
import datetime
class Currency:
RUB = 'RUB'
USD = 'USD'
EUR = 'EUR'
class Operation:
class Type:
PAY_IN = 'PayIn'
PAY_OUT = 'PayOut'
BUY = 'Buy'
BUY_CARD = 'BuyCard' # direct buy from the debit card
SELL = 'Sell'
DIVIDEND = 'Dividend'
SERVICE_COMMISION = 'ServiceCommission'
BROKER_COMMISION = 'BrokerCommission'
MARGIN_COMMISION = 'MarginCommission'
def __init__(self, payload):
self.type = payload['operationType']
self.payment = payload['payment']
self.currency = payload['currency']
self.dtm = _create_dtm_from_tcs_iso_dtm(payload['date'])
class Portfolio:
def __init__(self, payload):
self.positions = [Position(data) for data in payload['positions']]
def get_currency_to_value(self):
result = {}
for p in self.positions:
result[p.currency] = result.get(p.currency, 0) + p.value
return result
class Position:
def __init__(self, payload):
self.ticker = payload['ticker']
self.balance = payload['balance']
self.currency = payload['averagePositionPrice']['currency']
self.value = payload['averagePositionPrice']['value']*self.balance
class Ticker:
class Type:
STOCK = 'Stock'
CURRENCY = 'Currency'
def __init__(self, payload):
self.figi = payload['figi']
self.ticker = payload['ticker']
self.type = payload['type']
self.name = payload['name']
class Candle:
class Interval:
MIN1 = '1min'
MIN2 = '2min'
MIN3 = '3min'
MIN5 = '5min'
MIN10 = '10min'
MIN15 = '15min'
MIN30 = '30min'
HOUR = 'hour'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
@staticmethod
def to_timedelta(interval):
if 'min' in interval:
val = int(interval.strip('min'))
return datetime.timedelta(minutes=val)
elif interval == 'hour':
return datetime.timedelta(hours=1)
elif interval == 'day':
return datetime.timedelta(days=1)
elif interval == 'week':
return datetime.timedelta(weeks=1)
else:
raise NotImplementedError
def __init__(self, payload):
self.figi = payload['figi']
self.interval = payload['interval']
self.max = payload['h']
self.min = payload['l']
self.open = payload['o']
self.close = payload['c']
self.dtm = _create_dtm_from_tcs_iso_dtm(payload['time'])
def _create_dtm_from_tcs_iso_dtm(dtm_str): # tcs jokes
try:
dtm = datetime.datetime.strptime(dtm_str, '%Y-%m-%dT%H:%M:%S.%f%z')
except:
dtm = datetime.datetime.strptime(dtm_str, '%Y-%m-%dT%H:%M:%S%z')
return dtm | investments/models.py | import datetime
class Currency:
RUB = 'RUB'
USD = 'USD'
EUR = 'EUR'
class Operation:
class Type:
PAY_IN = 'PayIn'
PAY_OUT = 'PayOut'
BUY = 'Buy'
BUY_CARD = 'BuyCard' # direct buy from the debit card
SELL = 'Sell'
DIVIDEND = 'Dividend'
SERVICE_COMMISION = 'ServiceCommission'
BROKER_COMMISION = 'BrokerCommission'
MARGIN_COMMISION = 'MarginCommission'
def __init__(self, payload):
self.type = payload['operationType']
self.payment = payload['payment']
self.currency = payload['currency']
self.dtm = _create_dtm_from_tcs_iso_dtm(payload['date'])
class Portfolio:
def __init__(self, payload):
self.positions = [Position(data) for data in payload['positions']]
def get_currency_to_value(self):
result = {}
for p in self.positions:
result[p.currency] = result.get(p.currency, 0) + p.value
return result
class Position:
def __init__(self, payload):
self.ticker = payload['ticker']
self.balance = payload['balance']
self.currency = payload['averagePositionPrice']['currency']
self.value = payload['averagePositionPrice']['value']*self.balance
class Ticker:
class Type:
STOCK = 'Stock'
CURRENCY = 'Currency'
def __init__(self, payload):
self.figi = payload['figi']
self.ticker = payload['ticker']
self.type = payload['type']
self.name = payload['name']
class Candle:
class Interval:
MIN1 = '1min'
MIN2 = '2min'
MIN3 = '3min'
MIN5 = '5min'
MIN10 = '10min'
MIN15 = '15min'
MIN30 = '30min'
HOUR = 'hour'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
@staticmethod
def to_timedelta(interval):
if 'min' in interval:
val = int(interval.strip('min'))
return datetime.timedelta(minutes=val)
elif interval == 'hour':
return datetime.timedelta(hours=1)
elif interval == 'day':
return datetime.timedelta(days=1)
elif interval == 'week':
return datetime.timedelta(weeks=1)
else:
raise NotImplementedError
def __init__(self, payload):
self.figi = payload['figi']
self.interval = payload['interval']
self.max = payload['h']
self.min = payload['l']
self.open = payload['o']
self.close = payload['c']
self.dtm = _create_dtm_from_tcs_iso_dtm(payload['time'])
def _create_dtm_from_tcs_iso_dtm(dtm_str): # tcs jokes
try:
dtm = datetime.datetime.strptime(dtm_str, '%Y-%m-%dT%H:%M:%S.%f%z')
except:
dtm = datetime.datetime.strptime(dtm_str, '%Y-%m-%dT%H:%M:%S%z')
return dtm | 0.522689 | 0.151435 |
import re
from functools import wraps
from twisted.web.resource import Resource, NoResource
class _FakeResource(Resource):
_result = ''
isLeaf = True
def __init__(self, result):
Resource.__init__(self)
self._result = result
def render(self, request):
return self._result
def maybeResource(f):
@wraps(f)
def inner(*args, **kwargs):
result = f(*args, **kwargs)
if not isinstance(result, Resource):
result = _FakeResource(result)
return result
return inner
class APIResource(Resource):
_registry = None
def __new__(cls, *args, **kwds):
instance = super().__new__(cls, *args, **kwds)
instance._registry = []
for name in dir(instance):
attribute = getattr(instance, name)
annotations = getattr(attribute, "__txrestapi__", [])
for annotation in annotations:
method, regex = annotation
instance.register(method, regex, attribute)
return instance
def __init__(self, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
def _get_callback(self, request):
path_to_check = getattr(request, '_remaining_path', request.path)
if isinstance(path_to_check, bytes):
path_to_check = path_to_check.decode()
for m, r, cb in filter(lambda t: t[0] in (request.method, b'ALL'),
self._registry):
result = r.search(path_to_check)
if result:
request._remaining_path = path_to_check[result.span()[1]:]
return cb, result.groupdict()
return None, None
def register(self, method, regex, callback):
self._registry.append((method, re.compile(regex.decode()), callback))
def unregister(self, method=None, regex=None, callback=None):
if regex is not None:
regex = re.compile(regex.decode())
for m, r, cb in self._registry[:]:
if not method or (method and m == method):
if not regex or (regex and r == regex):
if not callback or (callback and cb == callback):
self._registry.remove((m, r, cb))
def getChild(self, name, request):
r = self.children.get(name, None)
if r is None:
# Go into the thing
callback, args = self._get_callback(request)
if callback is None:
return NoResource(message="Resource /'{}' not found".format(name))
else:
return maybeResource(callback)(request, **args)
else:
return r | src/txrestserver/txrestapi/resource.py | import re
from functools import wraps
from twisted.web.resource import Resource, NoResource
class _FakeResource(Resource):
_result = ''
isLeaf = True
def __init__(self, result):
Resource.__init__(self)
self._result = result
def render(self, request):
return self._result
def maybeResource(f):
@wraps(f)
def inner(*args, **kwargs):
result = f(*args, **kwargs)
if not isinstance(result, Resource):
result = _FakeResource(result)
return result
return inner
class APIResource(Resource):
_registry = None
def __new__(cls, *args, **kwds):
instance = super().__new__(cls, *args, **kwds)
instance._registry = []
for name in dir(instance):
attribute = getattr(instance, name)
annotations = getattr(attribute, "__txrestapi__", [])
for annotation in annotations:
method, regex = annotation
instance.register(method, regex, attribute)
return instance
def __init__(self, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
def _get_callback(self, request):
path_to_check = getattr(request, '_remaining_path', request.path)
if isinstance(path_to_check, bytes):
path_to_check = path_to_check.decode()
for m, r, cb in filter(lambda t: t[0] in (request.method, b'ALL'),
self._registry):
result = r.search(path_to_check)
if result:
request._remaining_path = path_to_check[result.span()[1]:]
return cb, result.groupdict()
return None, None
def register(self, method, regex, callback):
self._registry.append((method, re.compile(regex.decode()), callback))
def unregister(self, method=None, regex=None, callback=None):
if regex is not None:
regex = re.compile(regex.decode())
for m, r, cb in self._registry[:]:
if not method or (method and m == method):
if not regex or (regex and r == regex):
if not callback or (callback and cb == callback):
self._registry.remove((m, r, cb))
def getChild(self, name, request):
r = self.children.get(name, None)
if r is None:
# Go into the thing
callback, args = self._get_callback(request)
if callback is None:
return NoResource(message="Resource /'{}' not found".format(name))
else:
return maybeResource(callback)(request, **args)
else:
return r | 0.583797 | 0.065485 |
import pulumi
import pulumi.runtime
class Connection(pulumi.CustomResource):
"""
Provides a Connection of Direct Connect.
"""
def __init__(__self__, __name__, __opts__=None, bandwidth=None, location=None, name=None, tags=None):
"""Create a Connection resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not bandwidth:
raise TypeError('Missing required property bandwidth')
elif not isinstance(bandwidth, basestring):
raise TypeError('Expected property bandwidth to be a basestring')
__self__.bandwidth = bandwidth
"""
The bandwidth of the connection. Available values: 1Gbps, 10Gbps. Case sensitive.
"""
__props__['bandwidth'] = bandwidth
if not location:
raise TypeError('Missing required property location')
elif not isinstance(location, basestring):
raise TypeError('Expected property location to be a basestring')
__self__.location = location
"""
The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`.
"""
__props__['location'] = location
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the connection.
"""
__props__['name'] = name
if tags and not isinstance(tags, dict):
raise TypeError('Expected property tags to be a dict')
__self__.tags = tags
"""
A mapping of tags to assign to the resource.
"""
__props__['tags'] = tags
__self__.arn = pulumi.runtime.UNKNOWN
"""
The ARN of the connection.
"""
super(Connection, __self__).__init__(
'aws:directconnect/connection:Connection',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'bandwidth' in outs:
self.bandwidth = outs['bandwidth']
if 'location' in outs:
self.location = outs['location']
if 'name' in outs:
self.name = outs['name']
if 'tags' in outs:
self.tags = outs['tags'] | sdk/python/pulumi_aws/directconnect/connection.py |
import pulumi
import pulumi.runtime
class Connection(pulumi.CustomResource):
"""
Provides a Connection of Direct Connect.
"""
def __init__(__self__, __name__, __opts__=None, bandwidth=None, location=None, name=None, tags=None):
"""Create a Connection resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not bandwidth:
raise TypeError('Missing required property bandwidth')
elif not isinstance(bandwidth, basestring):
raise TypeError('Expected property bandwidth to be a basestring')
__self__.bandwidth = bandwidth
"""
The bandwidth of the connection. Available values: 1Gbps, 10Gbps. Case sensitive.
"""
__props__['bandwidth'] = bandwidth
if not location:
raise TypeError('Missing required property location')
elif not isinstance(location, basestring):
raise TypeError('Expected property location to be a basestring')
__self__.location = location
"""
The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`.
"""
__props__['location'] = location
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the connection.
"""
__props__['name'] = name
if tags and not isinstance(tags, dict):
raise TypeError('Expected property tags to be a dict')
__self__.tags = tags
"""
A mapping of tags to assign to the resource.
"""
__props__['tags'] = tags
__self__.arn = pulumi.runtime.UNKNOWN
"""
The ARN of the connection.
"""
super(Connection, __self__).__init__(
'aws:directconnect/connection:Connection',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'bandwidth' in outs:
self.bandwidth = outs['bandwidth']
if 'location' in outs:
self.location = outs['location']
if 'name' in outs:
self.name = outs['name']
if 'tags' in outs:
self.tags = outs['tags'] | 0.754463 | 0.052668 |
from minibench import Benchmark
from faker import Faker
from flask import Flask
from flask_restplus import fields, Api, Resource
from flask_restplus.swagger import Swagger
fake = Faker()
api = Api()
person = api.model('Person', {
'name': fields.String,
'age': fields.Integer
})
family = api.model('Family', {
'name': fields.String,
'father': fields.Nested(person),
'mother': fields.Nested(person),
'children': fields.List(fields.Nested(person))
})
@api.route('/families', endpoint='families')
class Families(Resource):
@api.marshal_with(family)
def get(self):
'''List all families'''
pass
@api.marshal_with(family)
@api.response(201, 'Family created')
def post(self):
'''Create a new family'''
pass
@api.route('/families/<name>/', endpoint='family')
@api.response(404, 'Family not found')
class Family(Resource):
@api.marshal_with(family)
def get(self):
'''Get a family given its name'''
pass
@api.marshal_with(family)
def put(self):
'''Update a family given its name'''
pass
@api.route('/persons', endpoint='persons')
class Persons(Resource):
@api.marshal_with(person)
def get(self):
'''List all persons'''
pass
@api.marshal_with(person)
@api.response(201, 'Person created')
def post(self):
'''Create a new person'''
pass
@api.route('/persons/<name>/', endpoint='person')
@api.response(404, 'Person not found')
class Person(Resource):
@api.marshal_with(person)
def get(self):
'''Get a person given its name'''
pass
@api.marshal_with(person)
def put(self):
'''Update a person given its name'''
pass
class SwaggerBenchmark(Benchmark):
'''Swagger serialization benchmark for a full API'''
times = 1000
def before_class(self):
self.app = Flask(__name__)
api.init_app(self.app)
def bench_swagger_specs(self):
with self.app.test_request_context('/'):
return Swagger(api).as_dict()
def bench_swagger_specs_cached(self):
with self.app.test_request_context('/'):
return api.__schema__ | benchmarks/swagger.bench.py | from minibench import Benchmark
from faker import Faker
from flask import Flask
from flask_restplus import fields, Api, Resource
from flask_restplus.swagger import Swagger
fake = Faker()
api = Api()
person = api.model('Person', {
'name': fields.String,
'age': fields.Integer
})
family = api.model('Family', {
'name': fields.String,
'father': fields.Nested(person),
'mother': fields.Nested(person),
'children': fields.List(fields.Nested(person))
})
@api.route('/families', endpoint='families')
class Families(Resource):
@api.marshal_with(family)
def get(self):
'''List all families'''
pass
@api.marshal_with(family)
@api.response(201, 'Family created')
def post(self):
'''Create a new family'''
pass
@api.route('/families/<name>/', endpoint='family')
@api.response(404, 'Family not found')
class Family(Resource):
@api.marshal_with(family)
def get(self):
'''Get a family given its name'''
pass
@api.marshal_with(family)
def put(self):
'''Update a family given its name'''
pass
@api.route('/persons', endpoint='persons')
class Persons(Resource):
@api.marshal_with(person)
def get(self):
'''List all persons'''
pass
@api.marshal_with(person)
@api.response(201, 'Person created')
def post(self):
'''Create a new person'''
pass
@api.route('/persons/<name>/', endpoint='person')
@api.response(404, 'Person not found')
class Person(Resource):
@api.marshal_with(person)
def get(self):
'''Get a person given its name'''
pass
@api.marshal_with(person)
def put(self):
'''Update a person given its name'''
pass
class SwaggerBenchmark(Benchmark):
'''Swagger serialization benchmark for a full API'''
times = 1000
def before_class(self):
self.app = Flask(__name__)
api.init_app(self.app)
def bench_swagger_specs(self):
with self.app.test_request_context('/'):
return Swagger(api).as_dict()
def bench_swagger_specs_cached(self):
with self.app.test_request_context('/'):
return api.__schema__ | 0.627723 | 0.059949 |
import numpy as np
import skimage
import skimage.transform as trans
"""
Some lines borrowed from: https://www.kaggle.com/sashakorekov/end-to-end-resnet50-with-tta-lb-0-93
"""
def rotate_clk_img_and_msk(img, msk):
angle = np.random.choice((4, 6, 8, 10, 12, 14, 16, 18, 20))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def rotate_cclk_img_and_msk(img, msk):
angle = np.random.choice((-20, -18, -16, -14, -12, -10, -8, -6, -4))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def flipping_img_and_msk(img, msk):
img_o = np.flip(img, axis=1)
msk_o = np.flip(msk, axis=1)
return img_o, msk_o
def zoom_img_and_msk(img, msk):
zoom_factor = np.random.choice((1.2, 1.5, 1.8, 2, 2.2, 2.5)) # currently doesn't have zoom out!
h, w = img.shape[:2]
# width and height of the zoomed image
zh = int(np.round(zoom_factor * h))
zw = int(np.round(zoom_factor * w))
img = trans.resize(img, (zh, zw), preserve_range=True, mode='symmetric')
msk = trans.resize(msk, (zh, zw), preserve_range=True, mode='symmetric')
region = np.random.choice((0, 1, 2, 3, 4))
# zooming out
if zoom_factor <= 1:
outimg = img
outmsk = msk
# zooming in
else:
# bounding box of the clipped region within the input array
if region == 0:
outimg = img[0:h, 0:w]
outmsk = msk[0:h, 0:w]
if region == 1:
outimg = img[0:h, zw - w:zw]
outmsk = msk[0:h, zw - w:zw]
if region == 2:
outimg = img[zh - h:zh, 0:w]
outmsk = msk[zh - h:zh, 0:w]
if region == 3:
outimg = img[zh - h:zh, zw - w:zw]
outmsk = msk[zh - h:zh, zw - w:zw]
if region == 4:
marh = h // 2
marw = w // 2
outimg = img[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
outmsk = msk[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
# to make sure the output is in the same size of the input
img_o = trans.resize(outimg, (h, w), preserve_range=True, mode='symmetric')
msk_o = trans.resize(outmsk, (h, w), preserve_range=True, mode='symmetric')
return img_o, msk_o | Cloud-Net-A-semantic-segmentation-CNN-for-cloud-detection/Cloud-Net/augmentation.py | import numpy as np
import skimage
import skimage.transform as trans
"""
Some lines borrowed from: https://www.kaggle.com/sashakorekov/end-to-end-resnet50-with-tta-lb-0-93
"""
def rotate_clk_img_and_msk(img, msk):
angle = np.random.choice((4, 6, 8, 10, 12, 14, 16, 18, 20))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def rotate_cclk_img_and_msk(img, msk):
angle = np.random.choice((-20, -18, -16, -14, -12, -10, -8, -6, -4))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def flipping_img_and_msk(img, msk):
img_o = np.flip(img, axis=1)
msk_o = np.flip(msk, axis=1)
return img_o, msk_o
def zoom_img_and_msk(img, msk):
zoom_factor = np.random.choice((1.2, 1.5, 1.8, 2, 2.2, 2.5)) # currently doesn't have zoom out!
h, w = img.shape[:2]
# width and height of the zoomed image
zh = int(np.round(zoom_factor * h))
zw = int(np.round(zoom_factor * w))
img = trans.resize(img, (zh, zw), preserve_range=True, mode='symmetric')
msk = trans.resize(msk, (zh, zw), preserve_range=True, mode='symmetric')
region = np.random.choice((0, 1, 2, 3, 4))
# zooming out
if zoom_factor <= 1:
outimg = img
outmsk = msk
# zooming in
else:
# bounding box of the clipped region within the input array
if region == 0:
outimg = img[0:h, 0:w]
outmsk = msk[0:h, 0:w]
if region == 1:
outimg = img[0:h, zw - w:zw]
outmsk = msk[0:h, zw - w:zw]
if region == 2:
outimg = img[zh - h:zh, 0:w]
outmsk = msk[zh - h:zh, 0:w]
if region == 3:
outimg = img[zh - h:zh, zw - w:zw]
outmsk = msk[zh - h:zh, zw - w:zw]
if region == 4:
marh = h // 2
marw = w // 2
outimg = img[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
outmsk = msk[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
# to make sure the output is in the same size of the input
img_o = trans.resize(outimg, (h, w), preserve_range=True, mode='symmetric')
msk_o = trans.resize(outmsk, (h, w), preserve_range=True, mode='symmetric')
return img_o, msk_o | 0.599251 | 0.453443 |
__author__ = '<NAME> <<EMAIL>>, <NAME>'
import functools
import sys
import threading
import traceback
from gevent import event as gevent_event
from pyon.core import bootstrap, MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import BadRequest, IonException, StreamException
from pyon.datastore.datastore import DataStore
from pyon.datastore.datastore_query import QUERY_EXP_KEY, DatastoreQueryBuilder, DQ
from pyon.ion.identifier import create_unique_event_id, create_simple_unique_id
from pyon.net.endpoint import Publisher, Subscriber, BaseEndpoint
from pyon.net.transport import XOTransport, NameTrio
from pyon.util.async import spawn
from pyon.util.containers import get_ion_ts_millis, is_valid_ts
from pyon.util.log import log
from interface.objects import Event
#The event will be ignored if older than this time period
VALID_EVENT_TIME_PERIOD = 365 * 24 * 60 * 60 * 1000 # one year
DEFAULT_SYSTEM_XS = "system"
DEFAULT_EVENTS_XP = "events"
# Alternative way to set process context
event_context = threading.local()
class EventError(IonException):
status_code = 500
class EventPublisher(Publisher):
@classmethod
def get_events_exchange_point(cls):
# match with default output of XOs
root_xs = CFG.get_safe("exchange.core.system_xs", DEFAULT_SYSTEM_XS)
events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
return "%s.%s.%s" % (bootstrap.get_sys_name(), root_xs, events_xp)
def __init__(self, event_type=None, xp=None, process=None, **kwargs):
"""
Constructs a publisher of events for a specific type.
@param event_type The name of the event type object
@param xp Exchange (AMQP) name, can be none, will use events default.
"""
self.event_type = event_type
self.process = process
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
self.event_repo = bootstrap.container_instance.event_repository
else:
self.event_repo = None
# generate an exchange name to publish events to
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER): # might be too early in chain
xp = xp or container.create_xp(self._events_xp)
to_name = xp
else:
xp = xp or self.get_events_exchange_point()
to_name = (xp, None)
Publisher.__init__(self, to_name=to_name, **kwargs)
def _topic(self, event_object):
"""
Builds the topic that this event should be published to.
"""
assert event_object
base_types = event_object.base_types or []
base_str = ".".join(reversed(base_types))
sub_type = event_object.sub_type or "_"
origin_type = event_object.origin_type or "_"
routing_key = "%s.%s.%s.%s.%s" % (base_str, event_object._get_type(), sub_type, origin_type, event_object.origin)
return routing_key
def publish_event_object(self, event_object):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param event_object the event object to be published
@retval event_object the event object which was published
"""
if not event_object:
raise BadRequest("Must provide event_object")
event_object.base_types = event_object._get_extends()
topic = self._topic(event_object) # Routing key generated using type_, base_types, origin, origin_type, sub_type
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER):
# make sure we are an xp, if not, upgrade
if not isinstance(self._send_name, XOTransport):
default_nt = NameTrio(self.get_events_exchange_point())
if isinstance(self._send_name, NameTrio) \
and self._send_name.exchange == default_nt.exchange \
and self._send_name.queue == default_nt.queue \
and self._send_name.binding == default_nt.binding:
self._send_name = container.create_xp(self._events_xp)
else:
self._send_name = container.create_xp(self._send_name)
xp = self._send_name
to_name = xp.create_route(topic)
else:
to_name = (self._send_name.exchange, topic)
current_time = get_ion_ts_millis()
# Ensure valid created timestamp if supplied
if event_object.ts_created:
if not is_valid_ts(event_object.ts_created):
raise BadRequest("The ts_created value is not a valid timestamp: '%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) > ( current_time + VALID_EVENT_TIME_PERIOD ):
raise BadRequest("This ts_created value is too far in the future:'%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) < (current_time - VALID_EVENT_TIME_PERIOD) :
raise BadRequest("This ts_created value is too old:'%s'" % (event_object.ts_created))
else:
event_object.ts_created = str(current_time)
# Set the actor id based on
if not event_object.actor_id:
event_object.actor_id = self._get_actor_id()
#Validate this object - ideally the validator should pass on problems, but for now just log
#any errors and keep going, since seeing invalid situations are better than skipping validation.
try:
event_object._validate()
except Exception as e:
log.exception(e)
#Ensure the event object has a unique id
if '_id' in event_object:
raise BadRequest("The event object cannot contain a _id field '%s'" % (event_object))
#Generate a unique ID for this event
event_object._id = create_unique_event_id()
try:
self.publish(event_object, to_name=to_name)
except Exception as ex:
log.exception("Failed to publish event (%s): '%s'" % (ex.message, event_object))
raise
return event_object
def publish_event(self, origin=None, event_type=None, **kwargs):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param origin the origin field value
@param event_type the event type (defaults to the EventPublisher's event_type if set)
@param kwargs additional event fields
@retval event_object the event object which was published
"""
event_type = event_type or self.event_type
if not event_type:
raise BadRequest("No event_type provided")
event_object = bootstrap.IonObject(event_type, origin=origin, **kwargs)
ret_val = self.publish_event_object(event_object)
return ret_val
def _get_actor_id(self):
"""Returns the current ion-actor-id from incoming process headers"""
actor_id = ""
try:
if self.process:
ctx = self.process.get_context()
actor_id = ctx.get(MSG_HEADER_ACTOR, None) or ""
except Exception as ex:
pass
actor_id = actor_id or getattr(event_context, "actor_id", None) or ""
return actor_id
# Helper for bootstrap purposes
local_event_queues = []
class BaseEventSubscriberMixin(object):
"""
A mixin class for Event subscribers to facilitate inheritance.
EventSubscribers must come in both standard and process level versions, which
rely on common base code. It is difficult to multiple inherit due to both of
them sharing a base class, so this mixin is preferred.
"""
ALL_EVENTS = "#"
@staticmethod
def _topic(event_type, origin, sub_type=None, origin_type=None):
"""
Builds the topic that this event should be published to.
If either side of the event_id.origin pair are missing, will subscribe to anything.
"""
if event_type == "Event":
event_type = "Event.#"
elif event_type:
event_type = "#.%s.#" % event_type
else:
event_type = "#"
sub_type = sub_type or "*.#"
origin_type = origin_type or "*"
origin = origin or "*"
return "%s.%s.%s.%s" % (event_type, sub_type, origin_type, origin)
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None):
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
self.event_type = event_type
self.sub_type = sub_type
self.origin_type = origin_type
self.origin = origin
# Default for auto_delete is True for events, unless otherwise specified
if auto_delete is None:
auto_delete = True
self._auto_delete = auto_delete
xp_name = xp_name or self._events_xp
if pattern:
binding = pattern
else:
binding = self._topic(event_type, origin, sub_type, origin_type)
# create queue_name if none passed in
if queue_name is None:
queue_name = "subsc_" + create_simple_unique_id()
# prepend proc name to queue name if we have one
if hasattr(self, "_process") and self._process:
queue_name = "%s_%s" % (self._process._proc_name, queue_name)
# do we have a container/ex_manager?
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container:
xp = container.create_xp(xp_name)
xne = container.create_event_xn(queue_name,
pattern=binding,
xp=xp,
auto_delete=auto_delete)
self._ev_recv_name = xne
self.binding = None
else:
# Remove this case. No container??
self.binding = binding
# prefix the queue_name, if specified, with the sysname
queue_name = "%s.system.%s" % (bootstrap.get_sys_name(), queue_name)
# set this name to be picked up by inherited folks
self._ev_recv_name = (xp_name, queue_name)
local_event_queues.append(queue_name)
def add_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" An another event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.bind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
def remove_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" Remove an event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.unbind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
class EventSubscriber(Subscriber, BaseEventSubscriberMixin):
"""Manages a subscription to an event queue for a select set of event types or
event origins or other specialized binding.
"""
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, callback=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None, *args, **kwargs):
"""
Initializer.
If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
named queues are not namespaces to their exchanges, so two different systems on the same broker
can cross-pollute messages if a named queue is used.
Note: an EventSubscriber needs to be closed to free broker resources
"""
self._cbthread = None
# sets self._ev_recv_name, self.binding
BaseEventSubscriberMixin.__init__(self, xp_name=xp_name, event_type=event_type, origin=origin,
queue_name=queue_name, sub_type=sub_type, origin_type=origin_type,
pattern=pattern, auto_delete=auto_delete)
log.debug("EventPublisher events pattern %s", self.binding)
from_name = self._get_from_name()
binding = self._get_binding()
Subscriber.__init__(self, from_name=from_name, binding=binding, callback=callback,
auto_delete=self._auto_delete, **kwargs)
def _get_from_name(self):
"""
Returns the from_name that the base Subscriber should listen on.
This is overridden in the process level.
"""
return self._ev_recv_name
def _get_binding(self):
"""
Returns the binding that the base Subscriber should use.
This is overridden in the process level.
"""
return self.binding
def start(self):
"""
Pass in a subscriber here, this will make it listen in a background greenlet.
"""
assert not self._cbthread, "start called twice on EventSubscriber"
gl = spawn(self.listen)
gl._glname = "EventSubscriber"
self._cbthread = gl
if not self._ready_event.wait(timeout=5):
log.warning('EventSubscriber start timed out.')
log.debug("EventSubscriber started. Event pattern=%s", self.binding)
return gl
def stop(self):
self.close()
self._cbthread.join(timeout=5)
self._cbthread.kill()
self._cbthread = None
log.debug("EventSubscriber stopped. Event pattern=%s", self.binding)
def __str__(self):
return "EventSubscriber at %s:\n\trecv_name: %s\n\tcb: %s" % (hex(id(self)), str(self._recv_name), str(self._callback))
class EventRepository(object):
"""
Front-end to a persistent persistent repository of events.
"""
def __init__(self, datastore_manager=None, container=None):
self.container = container or bootstrap.container_instance
# Get an instance of datastore configured as directory.
# May be persistent or mock, forced clean, with indexes
datastore_manager = datastore_manager or self.container.datastore_manager
self.event_store = datastore_manager.get_datastore("events", DataStore.DS_PROFILE.EVENTS)
def start(self):
pass
def stop(self):
self.close()
def close(self):
"""
Pass-through method to close the underlying datastore.
"""
self.event_store.close()
def put_event(self, event):
"""
Places an event object into the event repository. Retains event_ids if existing.
Returns event_id of new event.
"""
log.trace("Store event persistently %s", event)
if not isinstance(event, Event):
raise BadRequest("event must be type Event, not %s" % type(event))
event_id = event.__dict__.pop("_id", None)
new_event_id, _ = self.event_store.create(event, event_id)
return new_event_id
def put_events(self, events):
"""
Place given list of event objects into the event repository. Retains event_ids if existing
and otherwise creates event_ids.
Returns list of event_ids in same order and index as original list of events objects.
"""
log.debug("Store %s events persistently", len(events))
if type(events) is not list:
raise BadRequest("events must be type list, not %s" % type(events))
if not all([isinstance(event, Event) for event in events]):
raise BadRequest("events must all be type Event")
if events:
event_res = self.event_store.create_mult(events, allow_ids=True)
return [eid for success, eid, eobj in event_res]
else:
return None
def get_event(self, event_id):
"""
Returns the event object for given event_id or raises NotFound
"""
log.trace("Retrieving persistent event for id=%s", event_id)
event_obj = self.event_store.read(event_id)
return event_obj
def find_events(self, event_type=None, origin=None, start_ts=None, end_ts=None, id_only=False, **kwargs):
"""
Returns an ordered list of event objects for given query arguments.
Return format is list of (event_id, event_key, event object) tuples
"""
log.trace("Retrieving persistent event for event_type=%s, origin=%s, start_ts=%s, end_ts=%s, descending=%s, limit=%s",
event_type, origin, start_ts, end_ts, kwargs.get("descending", None), kwargs.get("limit", None))
events = None
design_name = "event"
view_name = None
start_key = []
end_key = []
if origin and event_type:
view_name = "by_origintype"
start_key = [origin, event_type]
end_key = [origin, event_type]
elif origin:
view_name = "by_origin"
start_key = [origin]
end_key = [origin]
elif event_type:
view_name = "by_type"
start_key = [event_type]
end_key = [event_type]
elif start_ts or end_ts:
view_name = "by_time"
start_key = []
end_key = []
else:
view_name = "by_time"
if kwargs.get("limit", 0) < 1:
kwargs["limit"] = 100
log.warn("Querying all events, no limit given. Set limit to 100")
if start_ts:
start_key.append(start_ts)
if end_ts:
end_key.append(end_ts)
events = self.event_store.find_by_view(design_name, view_name, start_key=start_key, end_key=end_key,
id_only=id_only, **kwargs)
return events
def find_events_query(self, query, id_only=False):
"""
Find events or event ids by using a standard datastore query. This function fills in datastore and
profile entries, so these can be omitted from the datastore query.
"""
if not query or not isinstance(query, dict) or not QUERY_EXP_KEY in query:
raise BadRequest("Illegal events query")
qargs = query["query_args"]
qargs["datastore"] = DataStore.DS_EVENTS
qargs["profile"] = DataStore.DS_PROFILE.EVENTS
qargs["id_only"] = id_only
events = self.event_store.find_by_query(query)
log.debug("find_events_query() found %s events", len(events))
return events
class EventGate(EventSubscriber):
def __init__(self, *args, **kwargs):
EventSubscriber.__init__(self, *args, callback=self.trigger_cb, **kwargs)
def trigger_cb(self, event):
self.stop()
self.gate.set()
def await(self, timeout=None):
self.gate = gevent_event.Event()
self.start()
return self.gate.wait(timeout)
def check_or_await(self):
pass
def handle_stream_exception(iorigin="stream_exception"):
"""
decorator for stream exceptions
"""
def real_decorator(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
fn(*args, **kwargs)
except StreamException as e:
info = "".join(traceback.format_tb(sys.exc_info()[2]))
pub = EventPublisher(event_type="ExceptionEvent")
pub.publish_event(origin=iorigin, description="stream exception event", exception_type=str(type(e)), exception_message=info)
return wrapped
return real_decorator
class EventQuery(DatastoreQueryBuilder):
"""
Helper class to build datastore queries for the event repository.
Based on the DatastoreQueryBuilder
"""
def __init__(self, order_by=None, limit=0, skip=0):
super(EventQuery, self).__init__(datastore=DataStore.DS_EVENTS, profile=DataStore.DS_PROFILE.EVENTS,
order_by=order_by, limit=limit, skip=skip)
def filter_type(self, type_expr, cmpop=None):
return self.txt_cmp(DQ.ATT_TYPE, type_expr, cmpop)
def filter_origin(self, origin_expr, cmpop=None):
return self.txt_cmp(DQ.EA_ORIGIN, origin_expr, cmpop)
def filter_origin_type(self, origin_expr, cmpop=None):
return self.txt_cmp(DQ.EA_ORIGIN_TYPE, origin_expr, cmpop)
def filter_sub_type(self, type_expr, cmpop=None):
return self.txt_cmp(DQ.EA_SUB_TYPE, type_expr, cmpop)
def filter_ts_created(self, from_expr=None, to_expr=None):
from_expr = self._make_ion_ts(from_expr)
to_expr = self._make_ion_ts(to_expr)
if from_expr and to_expr:
return self.and_(self.gte(DQ.EA_TS_CREATED, from_expr),
self.lte(DQ.EA_TS_CREATED, to_expr))
elif from_expr:
return self.gte(DQ.EA_TS_CREATED, from_expr)
elif to_expr:
return self.lte(DQ.EA_TS_CREATED, to_expr) | src/pyon/ion/event.py | __author__ = '<NAME> <<EMAIL>>, <NAME>'
import functools
import sys
import threading
import traceback
from gevent import event as gevent_event
from pyon.core import bootstrap, MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import BadRequest, IonException, StreamException
from pyon.datastore.datastore import DataStore
from pyon.datastore.datastore_query import QUERY_EXP_KEY, DatastoreQueryBuilder, DQ
from pyon.ion.identifier import create_unique_event_id, create_simple_unique_id
from pyon.net.endpoint import Publisher, Subscriber, BaseEndpoint
from pyon.net.transport import XOTransport, NameTrio
from pyon.util.async import spawn
from pyon.util.containers import get_ion_ts_millis, is_valid_ts
from pyon.util.log import log
from interface.objects import Event
#The event will be ignored if older than this time period
VALID_EVENT_TIME_PERIOD = 365 * 24 * 60 * 60 * 1000 # one year
DEFAULT_SYSTEM_XS = "system"
DEFAULT_EVENTS_XP = "events"
# Alternative way to set process context
event_context = threading.local()
class EventError(IonException):
status_code = 500
class EventPublisher(Publisher):
@classmethod
def get_events_exchange_point(cls):
# match with default output of XOs
root_xs = CFG.get_safe("exchange.core.system_xs", DEFAULT_SYSTEM_XS)
events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
return "%s.%s.%s" % (bootstrap.get_sys_name(), root_xs, events_xp)
def __init__(self, event_type=None, xp=None, process=None, **kwargs):
"""
Constructs a publisher of events for a specific type.
@param event_type The name of the event type object
@param xp Exchange (AMQP) name, can be none, will use events default.
"""
self.event_type = event_type
self.process = process
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
self.event_repo = bootstrap.container_instance.event_repository
else:
self.event_repo = None
# generate an exchange name to publish events to
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER): # might be too early in chain
xp = xp or container.create_xp(self._events_xp)
to_name = xp
else:
xp = xp or self.get_events_exchange_point()
to_name = (xp, None)
Publisher.__init__(self, to_name=to_name, **kwargs)
def _topic(self, event_object):
"""
Builds the topic that this event should be published to.
"""
assert event_object
base_types = event_object.base_types or []
base_str = ".".join(reversed(base_types))
sub_type = event_object.sub_type or "_"
origin_type = event_object.origin_type or "_"
routing_key = "%s.%s.%s.%s.%s" % (base_str, event_object._get_type(), sub_type, origin_type, event_object.origin)
return routing_key
def publish_event_object(self, event_object):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param event_object the event object to be published
@retval event_object the event object which was published
"""
if not event_object:
raise BadRequest("Must provide event_object")
event_object.base_types = event_object._get_extends()
topic = self._topic(event_object) # Routing key generated using type_, base_types, origin, origin_type, sub_type
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER):
# make sure we are an xp, if not, upgrade
if not isinstance(self._send_name, XOTransport):
default_nt = NameTrio(self.get_events_exchange_point())
if isinstance(self._send_name, NameTrio) \
and self._send_name.exchange == default_nt.exchange \
and self._send_name.queue == default_nt.queue \
and self._send_name.binding == default_nt.binding:
self._send_name = container.create_xp(self._events_xp)
else:
self._send_name = container.create_xp(self._send_name)
xp = self._send_name
to_name = xp.create_route(topic)
else:
to_name = (self._send_name.exchange, topic)
current_time = get_ion_ts_millis()
# Ensure valid created timestamp if supplied
if event_object.ts_created:
if not is_valid_ts(event_object.ts_created):
raise BadRequest("The ts_created value is not a valid timestamp: '%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) > ( current_time + VALID_EVENT_TIME_PERIOD ):
raise BadRequest("This ts_created value is too far in the future:'%s'" % (event_object.ts_created))
# Reject events that are older than specified time
if int(event_object.ts_created) < (current_time - VALID_EVENT_TIME_PERIOD) :
raise BadRequest("This ts_created value is too old:'%s'" % (event_object.ts_created))
else:
event_object.ts_created = str(current_time)
# Set the actor id based on
if not event_object.actor_id:
event_object.actor_id = self._get_actor_id()
#Validate this object - ideally the validator should pass on problems, but for now just log
#any errors and keep going, since seeing invalid situations are better than skipping validation.
try:
event_object._validate()
except Exception as e:
log.exception(e)
#Ensure the event object has a unique id
if '_id' in event_object:
raise BadRequest("The event object cannot contain a _id field '%s'" % (event_object))
#Generate a unique ID for this event
event_object._id = create_unique_event_id()
try:
self.publish(event_object, to_name=to_name)
except Exception as ex:
log.exception("Failed to publish event (%s): '%s'" % (ex.message, event_object))
raise
return event_object
def publish_event(self, origin=None, event_type=None, **kwargs):
"""
Publishes an event of given type for the given origin. Event_type defaults to an
event_type set when initializing the EventPublisher. Other kwargs fill out the fields
of the event. This operation will fail with an exception.
@param origin the origin field value
@param event_type the event type (defaults to the EventPublisher's event_type if set)
@param kwargs additional event fields
@retval event_object the event object which was published
"""
event_type = event_type or self.event_type
if not event_type:
raise BadRequest("No event_type provided")
event_object = bootstrap.IonObject(event_type, origin=origin, **kwargs)
ret_val = self.publish_event_object(event_object)
return ret_val
def _get_actor_id(self):
"""Returns the current ion-actor-id from incoming process headers"""
actor_id = ""
try:
if self.process:
ctx = self.process.get_context()
actor_id = ctx.get(MSG_HEADER_ACTOR, None) or ""
except Exception as ex:
pass
actor_id = actor_id or getattr(event_context, "actor_id", None) or ""
return actor_id
# Helper for bootstrap purposes
local_event_queues = []
class BaseEventSubscriberMixin(object):
"""
A mixin class for Event subscribers to facilitate inheritance.
EventSubscribers must come in both standard and process level versions, which
rely on common base code. It is difficult to multiple inherit due to both of
them sharing a base class, so this mixin is preferred.
"""
ALL_EVENTS = "#"
@staticmethod
def _topic(event_type, origin, sub_type=None, origin_type=None):
"""
Builds the topic that this event should be published to.
If either side of the event_id.origin pair are missing, will subscribe to anything.
"""
if event_type == "Event":
event_type = "Event.#"
elif event_type:
event_type = "#.%s.#" % event_type
else:
event_type = "#"
sub_type = sub_type or "*.#"
origin_type = origin_type or "*"
origin = origin or "*"
return "%s.%s.%s.%s" % (event_type, sub_type, origin_type, origin)
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None):
self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)
self.event_type = event_type
self.sub_type = sub_type
self.origin_type = origin_type
self.origin = origin
# Default for auto_delete is True for events, unless otherwise specified
if auto_delete is None:
auto_delete = True
self._auto_delete = auto_delete
xp_name = xp_name or self._events_xp
if pattern:
binding = pattern
else:
binding = self._topic(event_type, origin, sub_type, origin_type)
# create queue_name if none passed in
if queue_name is None:
queue_name = "subsc_" + create_simple_unique_id()
# prepend proc name to queue name if we have one
if hasattr(self, "_process") and self._process:
queue_name = "%s_%s" % (self._process._proc_name, queue_name)
# do we have a container/ex_manager?
container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
if container:
xp = container.create_xp(xp_name)
xne = container.create_event_xn(queue_name,
pattern=binding,
xp=xp,
auto_delete=auto_delete)
self._ev_recv_name = xne
self.binding = None
else:
# Remove this case. No container??
self.binding = binding
# prefix the queue_name, if specified, with the sysname
queue_name = "%s.system.%s" % (bootstrap.get_sys_name(), queue_name)
# set this name to be picked up by inherited folks
self._ev_recv_name = (xp_name, queue_name)
local_event_queues.append(queue_name)
def add_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" An another event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.bind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
def remove_event_subscription(self, event_type=None, origin=None, sub_type=None, origin_type=None):
""" Remove an event subscription based on given characteristics. """
binding = self._topic(event_type, origin, sub_type, origin_type)
if isinstance(self._ev_recv_name, XOTransport):
self._ev_recv_name.unbind(binding)
else:
raise BadRequest("Non XO event subscriber not supported")
class EventSubscriber(Subscriber, BaseEventSubscriberMixin):
"""Manages a subscription to an event queue for a select set of event types or
event origins or other specialized binding.
"""
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, callback=None,
sub_type=None, origin_type=None, pattern=None, auto_delete=None, *args, **kwargs):
"""
Initializer.
If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
named queues are not namespaces to their exchanges, so two different systems on the same broker
can cross-pollute messages if a named queue is used.
Note: an EventSubscriber needs to be closed to free broker resources
"""
self._cbthread = None
# sets self._ev_recv_name, self.binding
BaseEventSubscriberMixin.__init__(self, xp_name=xp_name, event_type=event_type, origin=origin,
queue_name=queue_name, sub_type=sub_type, origin_type=origin_type,
pattern=pattern, auto_delete=auto_delete)
log.debug("EventPublisher events pattern %s", self.binding)
from_name = self._get_from_name()
binding = self._get_binding()
Subscriber.__init__(self, from_name=from_name, binding=binding, callback=callback,
auto_delete=self._auto_delete, **kwargs)
def _get_from_name(self):
"""
Returns the from_name that the base Subscriber should listen on.
This is overridden in the process level.
"""
return self._ev_recv_name
def _get_binding(self):
"""
Returns the binding that the base Subscriber should use.
This is overridden in the process level.
"""
return self.binding
def start(self):
"""
Pass in a subscriber here, this will make it listen in a background greenlet.
"""
assert not self._cbthread, "start called twice on EventSubscriber"
gl = spawn(self.listen)
gl._glname = "EventSubscriber"
self._cbthread = gl
if not self._ready_event.wait(timeout=5):
log.warning('EventSubscriber start timed out.')
log.debug("EventSubscriber started. Event pattern=%s", self.binding)
return gl
def stop(self):
self.close()
self._cbthread.join(timeout=5)
self._cbthread.kill()
self._cbthread = None
log.debug("EventSubscriber stopped. Event pattern=%s", self.binding)
def __str__(self):
return "EventSubscriber at %s:\n\trecv_name: %s\n\tcb: %s" % (hex(id(self)), str(self._recv_name), str(self._callback))
class EventRepository(object):
"""
Front-end to a persistent persistent repository of events.
"""
def __init__(self, datastore_manager=None, container=None):
self.container = container or bootstrap.container_instance
# Get an instance of datastore configured as directory.
# May be persistent or mock, forced clean, with indexes
datastore_manager = datastore_manager or self.container.datastore_manager
self.event_store = datastore_manager.get_datastore("events", DataStore.DS_PROFILE.EVENTS)
def start(self):
pass
def stop(self):
self.close()
def close(self):
"""
Pass-through method to close the underlying datastore.
"""
self.event_store.close()
def put_event(self, event):
"""
Places an event object into the event repository. Retains event_ids if existing.
Returns event_id of new event.
"""
log.trace("Store event persistently %s", event)
if not isinstance(event, Event):
raise BadRequest("event must be type Event, not %s" % type(event))
event_id = event.__dict__.pop("_id", None)
new_event_id, _ = self.event_store.create(event, event_id)
return new_event_id
def put_events(self, events):
"""
Place given list of event objects into the event repository. Retains event_ids if existing
and otherwise creates event_ids.
Returns list of event_ids in same order and index as original list of events objects.
"""
log.debug("Store %s events persistently", len(events))
if type(events) is not list:
raise BadRequest("events must be type list, not %s" % type(events))
if not all([isinstance(event, Event) for event in events]):
raise BadRequest("events must all be type Event")
if events:
event_res = self.event_store.create_mult(events, allow_ids=True)
return [eid for success, eid, eobj in event_res]
else:
return None
def get_event(self, event_id):
"""
Returns the event object for given event_id or raises NotFound
"""
log.trace("Retrieving persistent event for id=%s", event_id)
event_obj = self.event_store.read(event_id)
return event_obj
def find_events(self, event_type=None, origin=None, start_ts=None, end_ts=None, id_only=False, **kwargs):
"""
Returns an ordered list of event objects for given query arguments.
Return format is list of (event_id, event_key, event object) tuples
"""
log.trace("Retrieving persistent event for event_type=%s, origin=%s, start_ts=%s, end_ts=%s, descending=%s, limit=%s",
event_type, origin, start_ts, end_ts, kwargs.get("descending", None), kwargs.get("limit", None))
events = None
design_name = "event"
view_name = None
start_key = []
end_key = []
if origin and event_type:
view_name = "by_origintype"
start_key = [origin, event_type]
end_key = [origin, event_type]
elif origin:
view_name = "by_origin"
start_key = [origin]
end_key = [origin]
elif event_type:
view_name = "by_type"
start_key = [event_type]
end_key = [event_type]
elif start_ts or end_ts:
view_name = "by_time"
start_key = []
end_key = []
else:
view_name = "by_time"
if kwargs.get("limit", 0) < 1:
kwargs["limit"] = 100
log.warn("Querying all events, no limit given. Set limit to 100")
if start_ts:
start_key.append(start_ts)
if end_ts:
end_key.append(end_ts)
events = self.event_store.find_by_view(design_name, view_name, start_key=start_key, end_key=end_key,
id_only=id_only, **kwargs)
return events
def find_events_query(self, query, id_only=False):
"""
Find events or event ids by using a standard datastore query. This function fills in datastore and
profile entries, so these can be omitted from the datastore query.
"""
if not query or not isinstance(query, dict) or not QUERY_EXP_KEY in query:
raise BadRequest("Illegal events query")
qargs = query["query_args"]
qargs["datastore"] = DataStore.DS_EVENTS
qargs["profile"] = DataStore.DS_PROFILE.EVENTS
qargs["id_only"] = id_only
events = self.event_store.find_by_query(query)
log.debug("find_events_query() found %s events", len(events))
return events
class EventGate(EventSubscriber):
def __init__(self, *args, **kwargs):
EventSubscriber.__init__(self, *args, callback=self.trigger_cb, **kwargs)
def trigger_cb(self, event):
self.stop()
self.gate.set()
def await(self, timeout=None):
self.gate = gevent_event.Event()
self.start()
return self.gate.wait(timeout)
def check_or_await(self):
pass
def handle_stream_exception(iorigin="stream_exception"):
"""
decorator for stream exceptions
"""
def real_decorator(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
fn(*args, **kwargs)
except StreamException as e:
info = "".join(traceback.format_tb(sys.exc_info()[2]))
pub = EventPublisher(event_type="ExceptionEvent")
pub.publish_event(origin=iorigin, description="stream exception event", exception_type=str(type(e)), exception_message=info)
return wrapped
return real_decorator
class EventQuery(DatastoreQueryBuilder):
"""
Helper class to build datastore queries for the event repository.
Based on the DatastoreQueryBuilder
"""
def __init__(self, order_by=None, limit=0, skip=0):
super(EventQuery, self).__init__(datastore=DataStore.DS_EVENTS, profile=DataStore.DS_PROFILE.EVENTS,
order_by=order_by, limit=limit, skip=skip)
def filter_type(self, type_expr, cmpop=None):
return self.txt_cmp(DQ.ATT_TYPE, type_expr, cmpop)
def filter_origin(self, origin_expr, cmpop=None):
return self.txt_cmp(DQ.EA_ORIGIN, origin_expr, cmpop)
def filter_origin_type(self, origin_expr, cmpop=None):
return self.txt_cmp(DQ.EA_ORIGIN_TYPE, origin_expr, cmpop)
def filter_sub_type(self, type_expr, cmpop=None):
return self.txt_cmp(DQ.EA_SUB_TYPE, type_expr, cmpop)
def filter_ts_created(self, from_expr=None, to_expr=None):
from_expr = self._make_ion_ts(from_expr)
to_expr = self._make_ion_ts(to_expr)
if from_expr and to_expr:
return self.and_(self.gte(DQ.EA_TS_CREATED, from_expr),
self.lte(DQ.EA_TS_CREATED, to_expr))
elif from_expr:
return self.gte(DQ.EA_TS_CREATED, from_expr)
elif to_expr:
return self.lte(DQ.EA_TS_CREATED, to_expr) | 0.450118 | 0.12363 |
from pyids.algorithms.optimizers.rs_optimizer import RSOptimizer
from ...data_structures.ids_ruleset import IDSRuleSet
import math
import numpy as np
import logging
class SLSOptimizer:
def __init__(self, objective_function, objective_func_params, optimizer_args=dict(), random_seed=None):
self.delta = 0.33
self.objective_function_params = objective_func_params
self.objective_function = objective_function
self.rs_optimizer = RSOptimizer(self.objective_function_params.params["all_rules"].ruleset,
random_seed=random_seed)
self.logger = logging.getLogger(SLSOptimizer.__name__)
self.max_omega_iterations = optimizer_args.get("max_omega_iterations", 10000)
if random_seed:
np.random.seed(random_seed)
def compute_OPT(self):
solution_set = self.rs_optimizer.optimize()
return self.objective_function.evaluate(IDSRuleSet(solution_set))
def estimate_omega(self, rule, solution_set, error_threshold, delta):
exp_include_func_vals = []
exp_exclude_func_vals = []
omega_estimation_extensions = 0
omega_estimation_iterations = 10
last_standard_error = 0
current_standard_error = 0
idx = 0
improvement_rate = 1
iteration_step_dict = dict()
while True:
for _ in range(omega_estimation_iterations):
temp_soln_set = self.sample_random_set(solution_set.ruleset, delta)
temp_soln_set.add(rule)
func_val = self.objective_function.evaluate(IDSRuleSet(temp_soln_set))
exp_include_func_vals.append(func_val)
for _ in range(omega_estimation_iterations):
temp_soln_set = self.sample_random_set(solution_set.ruleset, delta)
if rule in temp_soln_set:
temp_soln_set.remove(rule)
func_val = self.objective_function.evaluate(IDSRuleSet(temp_soln_set))
exp_exclude_func_vals.append(func_val)
variance_exp_include = np.var(exp_include_func_vals)
variance_exp_exclude = np.var(exp_exclude_func_vals)
standard_error = math.sqrt(
variance_exp_include / len(exp_include_func_vals) + variance_exp_exclude / len(exp_exclude_func_vals))
self.logger.debug("INFO - stardard error of omega estimate: {}".format(standard_error))
if standard_error > error_threshold:
if idx == 0:
last_standard_error = standard_error
idx += 1
continue
current_standard_error = standard_error
current_step = last_standard_error - current_standard_error
remaining_step = current_standard_error - error_threshold
improvement_rate = current_step / last_standard_error
if improvement_rate == 0:
improvement_rate = 1
iteration_step_dict[omega_estimation_iterations] = current_step
if not remaining_step <= current_step:
omega_estimation_iterations = round(omega_estimation_iterations / improvement_rate) + 1
if omega_estimation_iterations > self.max_omega_iterations:
omega_estimation_iterations = self.max_omega_iterations
self.logger.debug(
f"INFO - current_standard_error: {current_standard_error},"
f" last_standard_error: {last_standard_error},"
f" improvement_rate: {improvement_rate}, "
f" omega_estimation_iterations: {omega_estimation_iterations}"
)
self.logger.debug("INFO - {} > {} => omega estimation continues".format(standard_error, error_threshold))
last_standard_error = current_standard_error
if standard_error <= error_threshold:
self.logger.debug("INFO - omega succesfully estimated")
break
return np.mean(exp_include_func_vals) - np.mean(exp_exclude_func_vals)
def optimize_delta(self, delta, delta_prime):
all_rules = self.objective_function_params.params["all_rules"]
OPT = self.compute_OPT()
n = len(all_rules)
self.logger.debug("INFO - Number of input rules: {}".format(n))
self.logger.debug("INFO - RandomOptimizer estimated the OPTIMUM value as: {}".format(OPT))
self.logger.debug(
"INFO - Threshold value (2/(n*n) * OPT) = {}. This is the standard error treshold value.".format(
2.0 / (n * n) * OPT))
soln_set = IDSRuleSet(set())
restart_omega_computations = False
while True:
omega_estimates = {}
for rule in all_rules.ruleset:
self.logger.debug("INFO - Estimating omega for rule: {}".format(rule))
omega_est = self.estimate_omega(rule, soln_set, 1.0 / (n * n) * OPT, delta)
omega_estimates[rule] = omega_est
if rule in soln_set.ruleset:
continue
if omega_est > 2.0 / (n * n) * OPT:
# add this element to solution set and recompute omegas
soln_set.ruleset.add(rule)
restart_omega_computations = True
self.logger.debug("Adding rule: {} to the solution set.".format(rule))
break
if restart_omega_computations:
restart_omega_computations = False
continue
for rule_idx, rule in enumerate(soln_set.ruleset):
if omega_estimates[rule] < -2.0 / (n * n) * OPT:
soln_set.ruleset.remove(rule)
restart_omega_computations = True
self.logger.debug("Removing rule: {} from the solution set.".format(rule))
break
if restart_omega_computations:
restart_omega_computations = False
continue
return self.sample_random_set(soln_set.ruleset, delta_prime)
def sample_random_set(self, soln_set, delta):
# get params from cache
return_set = set()
all_rules_set = self.objective_function_params.params["all_rules"].ruleset
p = (delta + 1.0) / 2
for item in soln_set:
random_val = np.random.uniform()
if random_val <= p:
return_set.add(item)
p_prime = (1.0 - delta) / 2
for item in (all_rules_set - soln_set):
random_val = np.random.uniform()
if random_val <= p_prime:
return_set.add(item)
return return_set
def optimize(self):
solution1 = self.optimize_delta(1 / 3, 1 / 3)
solution2 = self.optimize_delta(1 / 3, -1.0)
func_val1 = self.objective_function.evaluate(IDSRuleSet(solution1))
func_val2 = self.objective_function.evaluate(IDSRuleSet(solution2))
if func_val1 >= func_val2:
return solution1
else:
return solution2 | pyids/algorithms/optimizers/sls_optimizer.py | from pyids.algorithms.optimizers.rs_optimizer import RSOptimizer
from ...data_structures.ids_ruleset import IDSRuleSet
import math
import numpy as np
import logging
class SLSOptimizer:
def __init__(self, objective_function, objective_func_params, optimizer_args=dict(), random_seed=None):
self.delta = 0.33
self.objective_function_params = objective_func_params
self.objective_function = objective_function
self.rs_optimizer = RSOptimizer(self.objective_function_params.params["all_rules"].ruleset,
random_seed=random_seed)
self.logger = logging.getLogger(SLSOptimizer.__name__)
self.max_omega_iterations = optimizer_args.get("max_omega_iterations", 10000)
if random_seed:
np.random.seed(random_seed)
def compute_OPT(self):
solution_set = self.rs_optimizer.optimize()
return self.objective_function.evaluate(IDSRuleSet(solution_set))
def estimate_omega(self, rule, solution_set, error_threshold, delta):
exp_include_func_vals = []
exp_exclude_func_vals = []
omega_estimation_extensions = 0
omega_estimation_iterations = 10
last_standard_error = 0
current_standard_error = 0
idx = 0
improvement_rate = 1
iteration_step_dict = dict()
while True:
for _ in range(omega_estimation_iterations):
temp_soln_set = self.sample_random_set(solution_set.ruleset, delta)
temp_soln_set.add(rule)
func_val = self.objective_function.evaluate(IDSRuleSet(temp_soln_set))
exp_include_func_vals.append(func_val)
for _ in range(omega_estimation_iterations):
temp_soln_set = self.sample_random_set(solution_set.ruleset, delta)
if rule in temp_soln_set:
temp_soln_set.remove(rule)
func_val = self.objective_function.evaluate(IDSRuleSet(temp_soln_set))
exp_exclude_func_vals.append(func_val)
variance_exp_include = np.var(exp_include_func_vals)
variance_exp_exclude = np.var(exp_exclude_func_vals)
standard_error = math.sqrt(
variance_exp_include / len(exp_include_func_vals) + variance_exp_exclude / len(exp_exclude_func_vals))
self.logger.debug("INFO - stardard error of omega estimate: {}".format(standard_error))
if standard_error > error_threshold:
if idx == 0:
last_standard_error = standard_error
idx += 1
continue
current_standard_error = standard_error
current_step = last_standard_error - current_standard_error
remaining_step = current_standard_error - error_threshold
improvement_rate = current_step / last_standard_error
if improvement_rate == 0:
improvement_rate = 1
iteration_step_dict[omega_estimation_iterations] = current_step
if not remaining_step <= current_step:
omega_estimation_iterations = round(omega_estimation_iterations / improvement_rate) + 1
if omega_estimation_iterations > self.max_omega_iterations:
omega_estimation_iterations = self.max_omega_iterations
self.logger.debug(
f"INFO - current_standard_error: {current_standard_error},"
f" last_standard_error: {last_standard_error},"
f" improvement_rate: {improvement_rate}, "
f" omega_estimation_iterations: {omega_estimation_iterations}"
)
self.logger.debug("INFO - {} > {} => omega estimation continues".format(standard_error, error_threshold))
last_standard_error = current_standard_error
if standard_error <= error_threshold:
self.logger.debug("INFO - omega succesfully estimated")
break
return np.mean(exp_include_func_vals) - np.mean(exp_exclude_func_vals)
def optimize_delta(self, delta, delta_prime):
all_rules = self.objective_function_params.params["all_rules"]
OPT = self.compute_OPT()
n = len(all_rules)
self.logger.debug("INFO - Number of input rules: {}".format(n))
self.logger.debug("INFO - RandomOptimizer estimated the OPTIMUM value as: {}".format(OPT))
self.logger.debug(
"INFO - Threshold value (2/(n*n) * OPT) = {}. This is the standard error treshold value.".format(
2.0 / (n * n) * OPT))
soln_set = IDSRuleSet(set())
restart_omega_computations = False
while True:
omega_estimates = {}
for rule in all_rules.ruleset:
self.logger.debug("INFO - Estimating omega for rule: {}".format(rule))
omega_est = self.estimate_omega(rule, soln_set, 1.0 / (n * n) * OPT, delta)
omega_estimates[rule] = omega_est
if rule in soln_set.ruleset:
continue
if omega_est > 2.0 / (n * n) * OPT:
# add this element to solution set and recompute omegas
soln_set.ruleset.add(rule)
restart_omega_computations = True
self.logger.debug("Adding rule: {} to the solution set.".format(rule))
break
if restart_omega_computations:
restart_omega_computations = False
continue
for rule_idx, rule in enumerate(soln_set.ruleset):
if omega_estimates[rule] < -2.0 / (n * n) * OPT:
soln_set.ruleset.remove(rule)
restart_omega_computations = True
self.logger.debug("Removing rule: {} from the solution set.".format(rule))
break
if restart_omega_computations:
restart_omega_computations = False
continue
return self.sample_random_set(soln_set.ruleset, delta_prime)
def sample_random_set(self, soln_set, delta):
# get params from cache
return_set = set()
all_rules_set = self.objective_function_params.params["all_rules"].ruleset
p = (delta + 1.0) / 2
for item in soln_set:
random_val = np.random.uniform()
if random_val <= p:
return_set.add(item)
p_prime = (1.0 - delta) / 2
for item in (all_rules_set - soln_set):
random_val = np.random.uniform()
if random_val <= p_prime:
return_set.add(item)
return return_set
def optimize(self):
solution1 = self.optimize_delta(1 / 3, 1 / 3)
solution2 = self.optimize_delta(1 / 3, -1.0)
func_val1 = self.objective_function.evaluate(IDSRuleSet(solution1))
func_val2 = self.objective_function.evaluate(IDSRuleSet(solution2))
if func_val1 >= func_val2:
return solution1
else:
return solution2 | 0.732687 | 0.295509 |
import scrapy
import re
import json
from locations.items import GeojsonPointItem
class McDonaldsEGSpider(scrapy.Spider):
name = "mcdonalds_eg"
item_attributes = {"brand": "McDonald's"}
allowed_domains = ["www.mcdonalds.eg"]
start_urls = ("http://www.mcdonalds.eg/ar/stores/page/228",)
def normalize_time(self, time_str, flag=False):
match = re.search(r"([0-9]{1,2}):([0-9]{1,2})", time_str)
h, m = match.groups()
return "%02d:%02d" % (
int(h) + 12 if flag and int(h) < 13 else int(h),
int(m),
)
def store_hours(self, response):
response = '<div id="mapInfo" style="width: 200px;">' + response
selector = scrapy.Selector(text=response)
opening_hours = selector.xpath(
'//*[@id="mapInfo"]/div/span[4]/text()'
).extract_first()
if not opening_hours:
return None
opening_hours = opening_hours.strip()
match = re.search(
r" ([0-9]{1,2}:[0-9]{1,2}).*([0-9]{1,2}:[0-9]{1,2})", opening_hours
)
if not match:
return None
start, end = match.groups()
start = self.normalize_time(start)
end = self.normalize_time(end, True)
return "Mo-Fr {}-{}".format(start, end)
def parse_data(self, response):
response = '<div id="mapInfo" style="width: 200px;">' + response
selector = scrapy.Selector(text=response)
name = (
selector.xpath('//h2[@class="store-title"]/b/text()')
.extract_first()
.strip()
)
address = (
selector.xpath('//*[@id="mapInfo"]/div/span[1]/text()')
.extract_first()
.strip()
)
phone = selector.xpath('//span[@class="store-tel"]/text()').extract_first()
phone = phone.strip() if phone else ""
return name, address, phone
def parse(self, response):
match = re.search(r"var locS = (\[.*\])\;", response.text)
results = json.loads(match.groups()[0])
index = 0
for data in results:
data = data.replace("''", '""')
try:
store = json.loads(data)
except Exception as e:
continue
name, address, phone = self.parse_data(store[0])
properties = {
"ref": index,
"addr_full": address,
"phone": phone,
"name": name,
"lat": store[1],
"lon": store[2],
}
opening_hours = self.store_hours(store[0])
if opening_hours:
properties["opening_hours"] = opening_hours
index = index + 1
yield GeojsonPointItem(**properties) | locations/spiders/mcdonalds_eg.py | import scrapy
import re
import json
from locations.items import GeojsonPointItem
class McDonaldsEGSpider(scrapy.Spider):
name = "mcdonalds_eg"
item_attributes = {"brand": "McDonald's"}
allowed_domains = ["www.mcdonalds.eg"]
start_urls = ("http://www.mcdonalds.eg/ar/stores/page/228",)
def normalize_time(self, time_str, flag=False):
match = re.search(r"([0-9]{1,2}):([0-9]{1,2})", time_str)
h, m = match.groups()
return "%02d:%02d" % (
int(h) + 12 if flag and int(h) < 13 else int(h),
int(m),
)
def store_hours(self, response):
response = '<div id="mapInfo" style="width: 200px;">' + response
selector = scrapy.Selector(text=response)
opening_hours = selector.xpath(
'//*[@id="mapInfo"]/div/span[4]/text()'
).extract_first()
if not opening_hours:
return None
opening_hours = opening_hours.strip()
match = re.search(
r" ([0-9]{1,2}:[0-9]{1,2}).*([0-9]{1,2}:[0-9]{1,2})", opening_hours
)
if not match:
return None
start, end = match.groups()
start = self.normalize_time(start)
end = self.normalize_time(end, True)
return "Mo-Fr {}-{}".format(start, end)
def parse_data(self, response):
response = '<div id="mapInfo" style="width: 200px;">' + response
selector = scrapy.Selector(text=response)
name = (
selector.xpath('//h2[@class="store-title"]/b/text()')
.extract_first()
.strip()
)
address = (
selector.xpath('//*[@id="mapInfo"]/div/span[1]/text()')
.extract_first()
.strip()
)
phone = selector.xpath('//span[@class="store-tel"]/text()').extract_first()
phone = phone.strip() if phone else ""
return name, address, phone
def parse(self, response):
match = re.search(r"var locS = (\[.*\])\;", response.text)
results = json.loads(match.groups()[0])
index = 0
for data in results:
data = data.replace("''", '""')
try:
store = json.loads(data)
except Exception as e:
continue
name, address, phone = self.parse_data(store[0])
properties = {
"ref": index,
"addr_full": address,
"phone": phone,
"name": name,
"lat": store[1],
"lon": store[2],
}
opening_hours = self.store_hours(store[0])
if opening_hours:
properties["opening_hours"] = opening_hours
index = index + 1
yield GeojsonPointItem(**properties) | 0.336767 | 0.165694 |
import re
from argparse import ArgumentParser
from itertools import groupby
from os import environ
from sys import stderr, stdout
from typing import Any, Dict, List, Match, Optional, Set, Tuple
from pithy.ansi import BG, BOLD, FILL, RST, RST_BOLD, RST_TXT, TXT, gray26, rgb6, sanitize_for_console, sgr
from pithy.diff import calc_diff
'''
same-same is a git diff highlighter.
To use it, add the following configuration to your .gitconfig:
[core]
pager = same-same | LESSANSIENDCHARS=mK less --RAW-CONTROL-CHARS
[interactive]
diffFilter = same-same -interactive | LESSANSIENDCHARS=mK less --RAW-CONTROL-CHARS
[diff]
noprefix = true
wsErrorHighlight = none # Intraline coloring creates trouble for same-same.
To disable, set 'SAME_SAME_OFF' in the shell environment.'
'''
class DiffLine:
def __init__(self, kind:str, match:Match) -> None:
self.kind = kind # The name from `diff_pat` named capture groups.
self.match = match
self.old_num = 0 # 1-indexed.
self.new_num = 0 # ".
self.chunk_idx = 0 # Positive for rem/add.
self.is_src = False # True for ctx/rem/add.
self.text = '' # Final text for ctx/rem/add.
@property
def raw_text(self) -> str:
return self.match.string # type: ignore
def set_text(self, key:str, clip:bool=False) -> None:
text = self.match[key]
if self.match['git_color'] or clip:
text = clip_reset(text)
self.text = text
def main() -> None:
arg_parser = ArgumentParser(prog='same-same', description='Git diff filter.')
arg_parser.add_argument('-interactive', action='store_true', help="Accommodate git's interactive mode.")
args = arg_parser.parse_args()
# Git can generate utf8-illegal sequences; ignore them.
stdin = open(0, errors='replace')
if 'SAME_SAME_OFF' in environ:
for line in stdin:
stdout.write(line)
exit(0)
dbg = ('SAME_SAME_DBG' in environ)
# Break input into segments starting with 'diff' lines.
# Note that the first segment might begin with any kind of line.
buffer:List[DiffLine] = []
def flush_buffer() -> None:
nonlocal buffer
if buffer:
handle_file_lines(buffer, interactive=args.interactive)
buffer = []
try:
for line in stdin:
raw_text = line.rstrip('\n')
match = diff_pat.match(raw_text)
assert match is not None
kind = match.lastgroup
assert kind is not None, match
if dbg:
print(kind, ':', repr(raw_text))
continue
if kind in pass_kinds:
flush_buffer()
print(raw_text)
continue
if kind == 'diff':
flush_buffer()
buffer.append(DiffLine(kind, match))
flush_buffer()
except BrokenPipeError:
stderr.close() # Prevents warning message.
def handle_file_lines(lines:List[DiffLine], interactive:bool) -> None:
first = lines[0]
kind = first.kind
# If we are processing `git log --graph` then parsing will fail; detect and skip.
if git_diff_graph_mode_pat.match(first.raw_text).end(): # type: ignore
for line in lines: print(line.raw_text)
return
# Scan `lines` to build up diff structures.
old_ctx_nums:Set[int] = set() # Line numbers of context lines.
new_ctx_nums:Set[int] = set() # ".
old_lines:Dict[int, DiffLine] = {} # Maps of line numbers to line structs.
new_lines:Dict[int, DiffLine] = {} # ".
old_uniques:Dict[str, Optional[int]] = {} # Maps unique line bodies to line numbers.
new_uniques:Dict[str, Optional[int]] = {} # ".
old_num = 0 # 1-indexed source line number.
new_num = 0 # ".
chunk_idx = 0 # Counter to differentiate chunks; becomes part of the groupby key.
old_path = '<OLD_PATH>'
new_path = '<NEW_PATH>'
is_prev_add_rem = False
is_loc_colored = False # Because git diff does not give ctx lines an sgr prefix, it seems more reliable to detect from the hunk.
for line in lines:
match = line.match
kind = line.kind
is_add_rem = (kind in ('rem', 'add'))
if not is_prev_add_rem and is_add_rem: chunk_idx += 1
is_prev_add_rem = is_add_rem
if kind in ('ctx', 'rem', 'add'):
line.is_src = True
if kind == 'ctx':
line.set_text(key='ctx_text', clip=is_loc_colored) # Clip is a hack; ctx lines do not have a leading color sequence.
elif kind == 'rem':
line.set_text(key='rem_text')
line.chunk_idx = chunk_idx
insert_unique_line(old_uniques, line.text, old_num)
elif kind == 'add':
line.set_text(key='add_text')
line.chunk_idx = chunk_idx
insert_unique_line(new_uniques, line.text, new_num)
if kind in ('ctx', 'rem'):
assert old_num not in old_lines
assert old_num not in old_ctx_nums
line.old_num = old_num
old_lines[old_num] = line
old_ctx_nums.add(old_num)
old_num += 1
if kind in ('ctx', 'add'):
assert new_num not in new_lines
assert new_num not in new_ctx_nums
line.new_num = new_num
new_lines[new_num] = line
new_ctx_nums.add(new_num)
new_num += 1
elif kind == 'loc':
is_loc_colored = bool(line.match['git_color'])
o = int(match['old_num'])
if o > 0:
assert o > old_num, (o, old_num, match.string)
old_num = o
n = int(match['new_num'])
if n > 0:
assert n > new_num
new_num = n
elif kind == 'diff': # Not the best way to parse paths, because paths with spaces are ambiguous.
paths = clip_reset(match['diff_paths']).split(' ') # Split into words, then guess at old and new split as best we can.
i = len(paths) // 2 # Assume that both sides have the same number of spaces between them.
# Note: if this does not prove sufficient for file renames we could try to find a split that matches either head or tail.
old_path = ' '.join(paths[:i])
new_path = ' '.join(paths[i:])
elif kind == 'old': old_path = vscode_path(clip_reset(match['old_path']).rstrip('\t'))
elif kind == 'new': new_path = vscode_path(clip_reset(match['new_path']).rstrip('\t')) # Not sure why this trailing tab appears.
#^ These lines are a better way to parse the paths, but are not always present (particularly when one side is /dev/null).
#^ Since they come after the diff line, they will overwrite the previous guess.
# Detect moved lines.
def diff_lines_match(old_idx:int, new_idx:int) -> bool:
if old_idx in old_ctx_nums or new_idx in new_ctx_nums: return False
try: return old_lines[old_idx].text.strip() == new_lines[new_idx].text.strip()
except KeyError: return False
old_moved_nums:Set[int] = set()
new_moved_nums:Set[int] = set()
for body, new_idx in new_uniques.items():
if new_idx is None: continue
old_idx = old_uniques.get(body)
if old_idx is None: continue
p_o = old_idx
p_n = new_idx
while diff_lines_match(p_o-1, p_n-1):
p_o -= 1
p_n -= 1
e_o = old_idx + 1
e_n = new_idx + 1
while diff_lines_match(e_o, e_n):
e_o += 1
e_n += 1
old_moved_nums.update(range(p_o, e_o))
new_moved_nums.update(range(p_n, e_n))
# Break lines into rem/add chunks and print them.
# While a "hunk" is a series of (possibly many) ctx/rem/add lines provided by git diff,
# a "chunk" is either a contiguous block of rem/add lines, or else any other single line.
# This approach simplifies the token diffing process so that it is a reasonably
# straightforward comparison of a rem block to an add block.
def chunk_key(line:DiffLine) -> Tuple[bool, int, bool]:
return (line.is_src, line.chunk_idx, (line.old_num in old_moved_nums or line.new_num in new_moved_nums))
for ((is_src, chunk_idx, is_moved), _chunk) in groupby(lines, key=chunk_key):
chunk = list(_chunk) # We iterate over the sequence several times.
if chunk_idx and not is_moved: # Chunk should be diffed by tokens.
# We must ensure that the same number of lines is output, at least for `-interactive` mode.
# Currently, we do not reorder lines at all, but that is an option for the future.
rem_lines = [l for l in chunk if l.old_num]
add_lines = [l for l in chunk if l.new_num]
add_token_diffs(rem_lines, add_lines)
elif is_src: # ctx or moved.
for l in chunk:
l.text = ''.join(sanitize_for_console(l.text))
# Print lines.
for line in chunk:
kind = line.kind
match = line.match
text = line.text
if kind == 'ctx':
print(text)
elif kind == 'rem':
m = C_REM_MOVED if line.old_num in old_moved_nums else ''
print(m, text, C_END, sep='')
elif kind == 'add':
m = C_ADD_MOVED if line.new_num in new_moved_nums else ''
print(m, text, C_END, sep='')
elif kind == 'loc':
new_num = match['new_num']
snippet = clip_reset(match['parent_snippet'])
s = ' ' + C_SNIPPET if snippet else ''
print(C_LOC, new_path, ':', new_num, ':', s, snippet, C_END, sep='')
elif kind == 'diff':
msg = new_path if (old_path == new_path) else '{} -> {}'.format(old_path, new_path)
print(C_FILE, msg, ':', C_END, sep='')
elif kind == 'meta':
print(C_MODE, new_path, ':', RST, ' ', line.text, sep='')
elif kind in dropped_kinds:
if interactive: # Cannot drop lines, because interactive mode slices the diff by line counts.
print(C_DROPPED, line.text, RST, sep='')
elif kind in pass_kinds:
print(line.text)
else:
raise Exception('unhandled kind: {}\n{!r}'.format(kind, text))
dropped_kinds = {
'idx', 'old', 'new'
}
pass_kinds = {
'author', 'commit', 'date', 'empty', 'other'
}
def insert_unique_line(d:Dict[str, Optional[int]], line:str, idx:int) -> None:
'For the purpose of movement detection, lines are tested for uniqueness after stripping leading and trailing whitespace.'
body = line.strip()
if body in d: d[body] = None
else: d[body] = idx
def add_token_diffs(rem_lines:List[DiffLine], add_lines:List[DiffLine]) -> None:
'Rewrite DiffLine.text values to include per-token diff highlighting.'
r = HighlightState(lines=rem_lines, tokens=tokenize_difflines(rem_lines), hl_ctx=C_REM_CTX, hl_space=C_REM_SPACE, hl_token=C_REM_TOKEN)
a = HighlightState(lines=add_lines, tokens=tokenize_difflines(add_lines), hl_ctx=C_ADD_CTX, hl_space=C_ADD_SPACE, hl_token=C_ADD_TOKEN)
for r_r, r_a in calc_diff(r.tokens, a.tokens):
if r_r and r_a: # Matching tokens; highlight as context.
r.highlight_frags(r_r, is_ctx=True)
a.highlight_frags(r_a, is_ctx=True)
elif r_r: r.highlight_frags(r_r, is_ctx=False)
elif r_a: a.highlight_frags(r_a, is_ctx=False)
# Update the mutable lines lists.
r.update_lines()
a.update_lines()
H_START, H_CTX, H_SPACE, H_TOKEN = range(4)
class HighlightState:
def __init__(self, lines:List[DiffLine], tokens:List[str], hl_ctx:str, hl_space:str, hl_token:str) -> None:
self.lines = lines
self.tokens = tokens
self.hl_ctx = hl_ctx # Context highlight.
self.hl_space = hl_space # Significant space highlight.
self.hl_token = hl_token # Token highlighter.
self.state = H_START
self.line_idx = 0
self.frags:List[List[str]] = [[] for _ in lines]
def highlight_frags(self, rng:range, is_ctx:bool) -> None:
for frag in self.tokens[rng.start:rng.stop]:
line_frags = self.frags[self.line_idx]
if frag == '\n':
if self.state != H_CTX:
line_frags.append(self.hl_ctx) # When combined with C_END, this highlights to end of line.
self.state = H_START
self.line_idx += 1
else:
if is_ctx:
if self.state != H_CTX:
self.state = H_CTX
line_frags.append(self.hl_ctx)
elif frag.isspace():
if self.state == H_START: # Don't highlight spaces at the start of lines.
self.state = H_TOKEN
line_frags.append(self.hl_token)
elif self.state == H_CTX:
self.state = H_SPACE
line_frags.append(self.hl_space)
else:
if self.state != H_TOKEN:
self.state = H_TOKEN
line_frags.append(self.hl_token)
line_frags.extend(sanitize_for_console(frag))
def update_lines(self) -> None:
for line, line_frags in zip(self.lines, self.frags):
line.text = ''.join(line_frags)
def tokenize_difflines(lines:List[DiffLine]) -> List[str]:
'Convert the list of line texts into a single list of tokens, including newline tokens.'
tokens:List[str] = []
for line in lines:
tokens.extend(m[0] for m in token_pat.finditer(line.text))
tokens.append('\n')
return tokens
def is_token_junk(token:str) -> bool:
'''
Treate newlines as tokens, but all other whitespace as junk.
This forces the diff algorithm to respect line breaks but not get distracted aligning to whitespace.
'''
return token.isspace() and token != '\n'
git_diff_graph_mode_pat = re.compile(r'(?x) [ /\*\|\\]*') # space is treated as literal inside of brackets, even in extended mode.
diff_pat = re.compile(r'''(?x)
(?P<git_color> \x1b \[ \d* m)*
(?:
(?P<empty> $ )
| (?P<commit> commit\ [0-9a-z]{40} )
| (?P<author> Author: )
| (?P<date> Date: )
| (?P<diff> diff\ --git\ (?P<diff_paths>.+) )
| (?P<idx> index )
| (?P<old> --- \ (?P<old_path>.+) )
| (?P<new> \+\+\+ \ (?P<new_path>.+) )
| (?P<loc> @@\ -(?P<old_num>\d+)(?P<old_len>,\d+)?\ \+(?P<new_num>\d+)(?P<new_len>,\d+)?\ @@
(?:\x1b\[m)? \ ? (?:\x1b\[m)? (?P<parent_snippet>.*) ) # Note the RST SPACE RST sequence.
| (?P<ctx> \ (?P<ctx_text>.*) )
| (?P<rem> - (?P<rem_text>.*) )
| (?P<add> \+(?:\x1b\[m\x1b\[32m)? (?P<add_text>.*) ) # Hack to remove extra color sequences that git 2.19.2 shows for these lines only.
| (?P<meta>
( old\ mode
| new\ mode
| deleted\ file\ mode
| new\ file\ mode
| copy\ from
| copy\ to
| rename\ from
| rename\ to
| similarity\ index
| dissimilarity\ index ) )
| (?P<other> .* )
)
''')
token_pat = re.compile(r'''(?x)
(?:(?!_)\w)+ # Word characters, excluding underscores.
| \d+ # Numbers.
| \ + # Spaces; distinct from other whitespace.
| \t+ # Tabs; distinct from other whitespace.
| \s+ # Other whitespace.
| . # Any other single character; newlines are never present so DOTALL is irrelevant.
''')
# same-same colors.
C_FILE = sgr(BG, rgb6(1, 0, 1))
C_MODE = sgr(BG, rgb6(1, 0, 1))
C_LOC = sgr(BG, rgb6(0, 1, 2))
C_UNKNOWN = sgr(BG, rgb6(5, 0, 5))
C_SNIPPET = sgr(TXT, gray26(22))
C_DROPPED = sgr(TXT, gray26(10))
REM_BG = rgb6(1, 0, 0)
ADD_BG = rgb6(0, 1, 0)
C_REM_MOVED = sgr(BG, REM_BG, TXT, rgb6(4, 2, 0)) # Move detected.
C_ADD_MOVED = sgr(BG, ADD_BG, TXT, rgb6(2, 4, 0))
# Token highlighting.
C_REM_CTX = sgr(BG, REM_BG, RST_TXT, RST_BOLD)
C_ADD_CTX = sgr(BG, ADD_BG, RST_TXT, RST_BOLD)
C_REM_SPACE = sgr(BG, rgb6(3, 0, 0), RST_TXT, BOLD) # Change to space.
C_ADD_SPACE = sgr(BG, rgb6(0, 3, 0), RST_TXT, BOLD)
C_REM_TOKEN = sgr(BG, REM_BG, TXT, rgb6(5, 2, 3), BOLD)
C_ADD_TOKEN = sgr(BG, ADD_BG, TXT, rgb6(2, 5, 3), BOLD)
C_RST_TOKEN = sgr(RST_TXT, RST_BOLD)
C_END = FILL
def vscode_path(path:str) -> str:
'VSCode will only recognize source locations if the path contains a slash; add "./" to plain file names.'
if '/' in path or '<' in path or '>' in path: return path # Do not alter pseudo-names like <stdin>.
return './' + path
def clip_reset(text:str) -> str:
return text[:-len(reset_sgr)] if text.endswith(reset_sgr) else text
reset_sgr = '\x1b[m' # Git uses the short version with "0" code omitted.
def errL(*items:Any) -> None: print(*items, sep='', file=stderr)
def errSL(*items:Any) -> None: print(*items, file=stderr)
if __name__ == '__main__': main() | pithytools/bin/same_same.py |
import re
from argparse import ArgumentParser
from itertools import groupby
from os import environ
from sys import stderr, stdout
from typing import Any, Dict, List, Match, Optional, Set, Tuple
from pithy.ansi import BG, BOLD, FILL, RST, RST_BOLD, RST_TXT, TXT, gray26, rgb6, sanitize_for_console, sgr
from pithy.diff import calc_diff
'''
same-same is a git diff highlighter.
To use it, add the following configuration to your .gitconfig:
[core]
pager = same-same | LESSANSIENDCHARS=mK less --RAW-CONTROL-CHARS
[interactive]
diffFilter = same-same -interactive | LESSANSIENDCHARS=mK less --RAW-CONTROL-CHARS
[diff]
noprefix = true
wsErrorHighlight = none # Intraline coloring creates trouble for same-same.
To disable, set 'SAME_SAME_OFF' in the shell environment.'
'''
class DiffLine:
def __init__(self, kind:str, match:Match) -> None:
self.kind = kind # The name from `diff_pat` named capture groups.
self.match = match
self.old_num = 0 # 1-indexed.
self.new_num = 0 # ".
self.chunk_idx = 0 # Positive for rem/add.
self.is_src = False # True for ctx/rem/add.
self.text = '' # Final text for ctx/rem/add.
@property
def raw_text(self) -> str:
return self.match.string # type: ignore
def set_text(self, key:str, clip:bool=False) -> None:
text = self.match[key]
if self.match['git_color'] or clip:
text = clip_reset(text)
self.text = text
def main() -> None:
arg_parser = ArgumentParser(prog='same-same', description='Git diff filter.')
arg_parser.add_argument('-interactive', action='store_true', help="Accommodate git's interactive mode.")
args = arg_parser.parse_args()
# Git can generate utf8-illegal sequences; ignore them.
stdin = open(0, errors='replace')
if 'SAME_SAME_OFF' in environ:
for line in stdin:
stdout.write(line)
exit(0)
dbg = ('SAME_SAME_DBG' in environ)
# Break input into segments starting with 'diff' lines.
# Note that the first segment might begin with any kind of line.
buffer:List[DiffLine] = []
def flush_buffer() -> None:
nonlocal buffer
if buffer:
handle_file_lines(buffer, interactive=args.interactive)
buffer = []
try:
for line in stdin:
raw_text = line.rstrip('\n')
match = diff_pat.match(raw_text)
assert match is not None
kind = match.lastgroup
assert kind is not None, match
if dbg:
print(kind, ':', repr(raw_text))
continue
if kind in pass_kinds:
flush_buffer()
print(raw_text)
continue
if kind == 'diff':
flush_buffer()
buffer.append(DiffLine(kind, match))
flush_buffer()
except BrokenPipeError:
stderr.close() # Prevents warning message.
def handle_file_lines(lines:List[DiffLine], interactive:bool) -> None:
first = lines[0]
kind = first.kind
# If we are processing `git log --graph` then parsing will fail; detect and skip.
if git_diff_graph_mode_pat.match(first.raw_text).end(): # type: ignore
for line in lines: print(line.raw_text)
return
# Scan `lines` to build up diff structures.
old_ctx_nums:Set[int] = set() # Line numbers of context lines.
new_ctx_nums:Set[int] = set() # ".
old_lines:Dict[int, DiffLine] = {} # Maps of line numbers to line structs.
new_lines:Dict[int, DiffLine] = {} # ".
old_uniques:Dict[str, Optional[int]] = {} # Maps unique line bodies to line numbers.
new_uniques:Dict[str, Optional[int]] = {} # ".
old_num = 0 # 1-indexed source line number.
new_num = 0 # ".
chunk_idx = 0 # Counter to differentiate chunks; becomes part of the groupby key.
old_path = '<OLD_PATH>'
new_path = '<NEW_PATH>'
is_prev_add_rem = False
is_loc_colored = False # Because git diff does not give ctx lines an sgr prefix, it seems more reliable to detect from the hunk.
for line in lines:
match = line.match
kind = line.kind
is_add_rem = (kind in ('rem', 'add'))
if not is_prev_add_rem and is_add_rem: chunk_idx += 1
is_prev_add_rem = is_add_rem
if kind in ('ctx', 'rem', 'add'):
line.is_src = True
if kind == 'ctx':
line.set_text(key='ctx_text', clip=is_loc_colored) # Clip is a hack; ctx lines do not have a leading color sequence.
elif kind == 'rem':
line.set_text(key='rem_text')
line.chunk_idx = chunk_idx
insert_unique_line(old_uniques, line.text, old_num)
elif kind == 'add':
line.set_text(key='add_text')
line.chunk_idx = chunk_idx
insert_unique_line(new_uniques, line.text, new_num)
if kind in ('ctx', 'rem'):
assert old_num not in old_lines
assert old_num not in old_ctx_nums
line.old_num = old_num
old_lines[old_num] = line
old_ctx_nums.add(old_num)
old_num += 1
if kind in ('ctx', 'add'):
assert new_num not in new_lines
assert new_num not in new_ctx_nums
line.new_num = new_num
new_lines[new_num] = line
new_ctx_nums.add(new_num)
new_num += 1
elif kind == 'loc':
is_loc_colored = bool(line.match['git_color'])
o = int(match['old_num'])
if o > 0:
assert o > old_num, (o, old_num, match.string)
old_num = o
n = int(match['new_num'])
if n > 0:
assert n > new_num
new_num = n
elif kind == 'diff': # Not the best way to parse paths, because paths with spaces are ambiguous.
paths = clip_reset(match['diff_paths']).split(' ') # Split into words, then guess at old and new split as best we can.
i = len(paths) // 2 # Assume that both sides have the same number of spaces between them.
# Note: if this does not prove sufficient for file renames we could try to find a split that matches either head or tail.
old_path = ' '.join(paths[:i])
new_path = ' '.join(paths[i:])
elif kind == 'old': old_path = vscode_path(clip_reset(match['old_path']).rstrip('\t'))
elif kind == 'new': new_path = vscode_path(clip_reset(match['new_path']).rstrip('\t')) # Not sure why this trailing tab appears.
#^ These lines are a better way to parse the paths, but are not always present (particularly when one side is /dev/null).
#^ Since they come after the diff line, they will overwrite the previous guess.
# Detect moved lines.
def diff_lines_match(old_idx:int, new_idx:int) -> bool:
if old_idx in old_ctx_nums or new_idx in new_ctx_nums: return False
try: return old_lines[old_idx].text.strip() == new_lines[new_idx].text.strip()
except KeyError: return False
old_moved_nums:Set[int] = set()
new_moved_nums:Set[int] = set()
for body, new_idx in new_uniques.items():
if new_idx is None: continue
old_idx = old_uniques.get(body)
if old_idx is None: continue
p_o = old_idx
p_n = new_idx
while diff_lines_match(p_o-1, p_n-1):
p_o -= 1
p_n -= 1
e_o = old_idx + 1
e_n = new_idx + 1
while diff_lines_match(e_o, e_n):
e_o += 1
e_n += 1
old_moved_nums.update(range(p_o, e_o))
new_moved_nums.update(range(p_n, e_n))
# Break lines into rem/add chunks and print them.
# While a "hunk" is a series of (possibly many) ctx/rem/add lines provided by git diff,
# a "chunk" is either a contiguous block of rem/add lines, or else any other single line.
# This approach simplifies the token diffing process so that it is a reasonably
# straightforward comparison of a rem block to an add block.
def chunk_key(line:DiffLine) -> Tuple[bool, int, bool]:
return (line.is_src, line.chunk_idx, (line.old_num in old_moved_nums or line.new_num in new_moved_nums))
for ((is_src, chunk_idx, is_moved), _chunk) in groupby(lines, key=chunk_key):
chunk = list(_chunk) # We iterate over the sequence several times.
if chunk_idx and not is_moved: # Chunk should be diffed by tokens.
# We must ensure that the same number of lines is output, at least for `-interactive` mode.
# Currently, we do not reorder lines at all, but that is an option for the future.
rem_lines = [l for l in chunk if l.old_num]
add_lines = [l for l in chunk if l.new_num]
add_token_diffs(rem_lines, add_lines)
elif is_src: # ctx or moved.
for l in chunk:
l.text = ''.join(sanitize_for_console(l.text))
# Print lines.
for line in chunk:
kind = line.kind
match = line.match
text = line.text
if kind == 'ctx':
print(text)
elif kind == 'rem':
m = C_REM_MOVED if line.old_num in old_moved_nums else ''
print(m, text, C_END, sep='')
elif kind == 'add':
m = C_ADD_MOVED if line.new_num in new_moved_nums else ''
print(m, text, C_END, sep='')
elif kind == 'loc':
new_num = match['new_num']
snippet = clip_reset(match['parent_snippet'])
s = ' ' + C_SNIPPET if snippet else ''
print(C_LOC, new_path, ':', new_num, ':', s, snippet, C_END, sep='')
elif kind == 'diff':
msg = new_path if (old_path == new_path) else '{} -> {}'.format(old_path, new_path)
print(C_FILE, msg, ':', C_END, sep='')
elif kind == 'meta':
print(C_MODE, new_path, ':', RST, ' ', line.text, sep='')
elif kind in dropped_kinds:
if interactive: # Cannot drop lines, because interactive mode slices the diff by line counts.
print(C_DROPPED, line.text, RST, sep='')
elif kind in pass_kinds:
print(line.text)
else:
raise Exception('unhandled kind: {}\n{!r}'.format(kind, text))
dropped_kinds = {
'idx', 'old', 'new'
}
pass_kinds = {
'author', 'commit', 'date', 'empty', 'other'
}
def insert_unique_line(d:Dict[str, Optional[int]], line:str, idx:int) -> None:
'For the purpose of movement detection, lines are tested for uniqueness after stripping leading and trailing whitespace.'
body = line.strip()
if body in d: d[body] = None
else: d[body] = idx
def add_token_diffs(rem_lines:List[DiffLine], add_lines:List[DiffLine]) -> None:
'Rewrite DiffLine.text values to include per-token diff highlighting.'
r = HighlightState(lines=rem_lines, tokens=tokenize_difflines(rem_lines), hl_ctx=C_REM_CTX, hl_space=C_REM_SPACE, hl_token=C_REM_TOKEN)
a = HighlightState(lines=add_lines, tokens=tokenize_difflines(add_lines), hl_ctx=C_ADD_CTX, hl_space=C_ADD_SPACE, hl_token=C_ADD_TOKEN)
for r_r, r_a in calc_diff(r.tokens, a.tokens):
if r_r and r_a: # Matching tokens; highlight as context.
r.highlight_frags(r_r, is_ctx=True)
a.highlight_frags(r_a, is_ctx=True)
elif r_r: r.highlight_frags(r_r, is_ctx=False)
elif r_a: a.highlight_frags(r_a, is_ctx=False)
# Update the mutable lines lists.
r.update_lines()
a.update_lines()
H_START, H_CTX, H_SPACE, H_TOKEN = range(4)
class HighlightState:
def __init__(self, lines:List[DiffLine], tokens:List[str], hl_ctx:str, hl_space:str, hl_token:str) -> None:
self.lines = lines
self.tokens = tokens
self.hl_ctx = hl_ctx # Context highlight.
self.hl_space = hl_space # Significant space highlight.
self.hl_token = hl_token # Token highlighter.
self.state = H_START
self.line_idx = 0
self.frags:List[List[str]] = [[] for _ in lines]
def highlight_frags(self, rng:range, is_ctx:bool) -> None:
for frag in self.tokens[rng.start:rng.stop]:
line_frags = self.frags[self.line_idx]
if frag == '\n':
if self.state != H_CTX:
line_frags.append(self.hl_ctx) # When combined with C_END, this highlights to end of line.
self.state = H_START
self.line_idx += 1
else:
if is_ctx:
if self.state != H_CTX:
self.state = H_CTX
line_frags.append(self.hl_ctx)
elif frag.isspace():
if self.state == H_START: # Don't highlight spaces at the start of lines.
self.state = H_TOKEN
line_frags.append(self.hl_token)
elif self.state == H_CTX:
self.state = H_SPACE
line_frags.append(self.hl_space)
else:
if self.state != H_TOKEN:
self.state = H_TOKEN
line_frags.append(self.hl_token)
line_frags.extend(sanitize_for_console(frag))
def update_lines(self) -> None:
for line, line_frags in zip(self.lines, self.frags):
line.text = ''.join(line_frags)
def tokenize_difflines(lines:List[DiffLine]) -> List[str]:
'Convert the list of line texts into a single list of tokens, including newline tokens.'
tokens:List[str] = []
for line in lines:
tokens.extend(m[0] for m in token_pat.finditer(line.text))
tokens.append('\n')
return tokens
def is_token_junk(token:str) -> bool:
'''
Treate newlines as tokens, but all other whitespace as junk.
This forces the diff algorithm to respect line breaks but not get distracted aligning to whitespace.
'''
return token.isspace() and token != '\n'
git_diff_graph_mode_pat = re.compile(r'(?x) [ /\*\|\\]*') # space is treated as literal inside of brackets, even in extended mode.
diff_pat = re.compile(r'''(?x)
(?P<git_color> \x1b \[ \d* m)*
(?:
(?P<empty> $ )
| (?P<commit> commit\ [0-9a-z]{40} )
| (?P<author> Author: )
| (?P<date> Date: )
| (?P<diff> diff\ --git\ (?P<diff_paths>.+) )
| (?P<idx> index )
| (?P<old> --- \ (?P<old_path>.+) )
| (?P<new> \+\+\+ \ (?P<new_path>.+) )
| (?P<loc> @@\ -(?P<old_num>\d+)(?P<old_len>,\d+)?\ \+(?P<new_num>\d+)(?P<new_len>,\d+)?\ @@
(?:\x1b\[m)? \ ? (?:\x1b\[m)? (?P<parent_snippet>.*) ) # Note the RST SPACE RST sequence.
| (?P<ctx> \ (?P<ctx_text>.*) )
| (?P<rem> - (?P<rem_text>.*) )
| (?P<add> \+(?:\x1b\[m\x1b\[32m)? (?P<add_text>.*) ) # Hack to remove extra color sequences that git 2.19.2 shows for these lines only.
| (?P<meta>
( old\ mode
| new\ mode
| deleted\ file\ mode
| new\ file\ mode
| copy\ from
| copy\ to
| rename\ from
| rename\ to
| similarity\ index
| dissimilarity\ index ) )
| (?P<other> .* )
)
''')
token_pat = re.compile(r'''(?x)
(?:(?!_)\w)+ # Word characters, excluding underscores.
| \d+ # Numbers.
| \ + # Spaces; distinct from other whitespace.
| \t+ # Tabs; distinct from other whitespace.
| \s+ # Other whitespace.
| . # Any other single character; newlines are never present so DOTALL is irrelevant.
''')
# same-same colors.
C_FILE = sgr(BG, rgb6(1, 0, 1))
C_MODE = sgr(BG, rgb6(1, 0, 1))
C_LOC = sgr(BG, rgb6(0, 1, 2))
C_UNKNOWN = sgr(BG, rgb6(5, 0, 5))
C_SNIPPET = sgr(TXT, gray26(22))
C_DROPPED = sgr(TXT, gray26(10))
REM_BG = rgb6(1, 0, 0)
ADD_BG = rgb6(0, 1, 0)
C_REM_MOVED = sgr(BG, REM_BG, TXT, rgb6(4, 2, 0)) # Move detected.
C_ADD_MOVED = sgr(BG, ADD_BG, TXT, rgb6(2, 4, 0))
# Token highlighting.
C_REM_CTX = sgr(BG, REM_BG, RST_TXT, RST_BOLD)
C_ADD_CTX = sgr(BG, ADD_BG, RST_TXT, RST_BOLD)
C_REM_SPACE = sgr(BG, rgb6(3, 0, 0), RST_TXT, BOLD) # Change to space.
C_ADD_SPACE = sgr(BG, rgb6(0, 3, 0), RST_TXT, BOLD)
C_REM_TOKEN = sgr(BG, REM_BG, TXT, rgb6(5, 2, 3), BOLD)
C_ADD_TOKEN = sgr(BG, ADD_BG, TXT, rgb6(2, 5, 3), BOLD)
C_RST_TOKEN = sgr(RST_TXT, RST_BOLD)
C_END = FILL
def vscode_path(path:str) -> str:
'VSCode will only recognize source locations if the path contains a slash; add "./" to plain file names.'
if '/' in path or '<' in path or '>' in path: return path # Do not alter pseudo-names like <stdin>.
return './' + path
def clip_reset(text:str) -> str:
return text[:-len(reset_sgr)] if text.endswith(reset_sgr) else text
reset_sgr = '\x1b[m' # Git uses the short version with "0" code omitted.
def errL(*items:Any) -> None: print(*items, sep='', file=stderr)
def errSL(*items:Any) -> None: print(*items, file=stderr)
if __name__ == '__main__': main() | 0.61555 | 0.217379 |
from rebus.agent import Agent
import threading
import time
import tornado.ioloop
import tornado.web
import tornado.template
import rebus.agents.inject
from rebus.descriptor import Descriptor
@Agent.register
class HTTPListener(Agent):
_name_ = "httplistener"
_desc_ = "Push any descriptor that gets POSTed to the bus"
postprocessors = dict()
def init_agent(self):
self.gui = Application(self)
self.gui.listen(8081)
self.ioloop = tornado.ioloop.IOLoop.instance()
t = threading.Thread(target=self.ioloop.start)
t.daemon = True
t.start()
def selector_filter(self, selector):
return False
def inject(self, desc):
self.push(desc)
@staticmethod
def registerPostProcessor(selector_prefix):
"""
:param selector_prefix: selector prefix for which postprocessing should
be performed
Registers a method which will be called for selectors that match
provided prefix.
Only one postprocessing method will be called.
The registered callback method must have the following prototype:
callback(agent, selector, domain, label, value, start_time)
This method may return either None, or a Descriptor object.
The callback method will be run in the same process as the HTTPListener
agent.
"""
def func_wrapper(f):
HTTPListener.postprocessors[selector_prefix] = f
return f
return func_wrapper
class Application(tornado.web.Application):
def __init__(self, agent):
handlers = [
(r"/inject(/[^\?]*)\??.*", InjectHandler),
]
self.agent = agent
tornado.web.Application.__init__(self, handlers)
class InjectHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def post(self, selector, *args, **kwargs):
"""
Handles POST requests. Injects POSTed values to the bus.
URL format: /inject/sel/ector?domain=DOMAIN&label=LABEL&force_inject=1
If selector is /auto, guess the selector type.
domain is optional - defaults to 'default'
force_inject is not obbeyed if a postprocessor intercepts the
descriptor
"""
start_time = time.time()
label = self.get_argument('label', 'defaultlabel')
domain = self.get_argument('domain', 'default')
value = self.request.body
force_inject = self.get_argument('force_inject', False)
if force_inject != False:
force_inject = True
if selector == '/auto':
selector = rebus.agents.inject.guess_selector(buf=value)
postprocessor = None
for (prefix, function) in HTTPListener.postprocessors.items():
if selector.startswith(prefix):
postprocessor = function
if postprocessor is not None:
def process_inject(agent, selector, domain, label, value,
start_time):
failed = False
try:
desc = postprocessor(agent, selector, domain, label, value,
start_time)
if desc is not None:
agent.push(desc)
except Exception as e:
failed = True
agent.ioloop.add_callback(self.report_result, failed)
else:
if force_inject:
create_new = Descriptor.new_with_randomhash
else:
create_new = Descriptor
def process_inject(agent, selector, domain, label, value,
start_time):
failed = False
try:
done = time.time()
desc = create_new(
label, selector, value, domain,
agent=self.application.agent._name_ + '_inject',
processing_time=(done-start_time))
agent.push(desc)
except Exception as e:
failed = True
agent.ioloop.add_callback(self.report_result, failed)
self.application.agent.bus.busthread_call(
process_inject,
*(self.application.agent, selector, domain, label, value,
start_time))
def report_result(self, failed):
if failed:
self.set_status(500)
self.finish() | rebus/agents/http_listener.py | from rebus.agent import Agent
import threading
import time
import tornado.ioloop
import tornado.web
import tornado.template
import rebus.agents.inject
from rebus.descriptor import Descriptor
@Agent.register
class HTTPListener(Agent):
_name_ = "httplistener"
_desc_ = "Push any descriptor that gets POSTed to the bus"
postprocessors = dict()
def init_agent(self):
self.gui = Application(self)
self.gui.listen(8081)
self.ioloop = tornado.ioloop.IOLoop.instance()
t = threading.Thread(target=self.ioloop.start)
t.daemon = True
t.start()
def selector_filter(self, selector):
return False
def inject(self, desc):
self.push(desc)
@staticmethod
def registerPostProcessor(selector_prefix):
"""
:param selector_prefix: selector prefix for which postprocessing should
be performed
Registers a method which will be called for selectors that match
provided prefix.
Only one postprocessing method will be called.
The registered callback method must have the following prototype:
callback(agent, selector, domain, label, value, start_time)
This method may return either None, or a Descriptor object.
The callback method will be run in the same process as the HTTPListener
agent.
"""
def func_wrapper(f):
HTTPListener.postprocessors[selector_prefix] = f
return f
return func_wrapper
class Application(tornado.web.Application):
def __init__(self, agent):
handlers = [
(r"/inject(/[^\?]*)\??.*", InjectHandler),
]
self.agent = agent
tornado.web.Application.__init__(self, handlers)
class InjectHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def post(self, selector, *args, **kwargs):
"""
Handles POST requests. Injects POSTed values to the bus.
URL format: /inject/sel/ector?domain=DOMAIN&label=LABEL&force_inject=1
If selector is /auto, guess the selector type.
domain is optional - defaults to 'default'
force_inject is not obbeyed if a postprocessor intercepts the
descriptor
"""
start_time = time.time()
label = self.get_argument('label', 'defaultlabel')
domain = self.get_argument('domain', 'default')
value = self.request.body
force_inject = self.get_argument('force_inject', False)
if force_inject != False:
force_inject = True
if selector == '/auto':
selector = rebus.agents.inject.guess_selector(buf=value)
postprocessor = None
for (prefix, function) in HTTPListener.postprocessors.items():
if selector.startswith(prefix):
postprocessor = function
if postprocessor is not None:
def process_inject(agent, selector, domain, label, value,
start_time):
failed = False
try:
desc = postprocessor(agent, selector, domain, label, value,
start_time)
if desc is not None:
agent.push(desc)
except Exception as e:
failed = True
agent.ioloop.add_callback(self.report_result, failed)
else:
if force_inject:
create_new = Descriptor.new_with_randomhash
else:
create_new = Descriptor
def process_inject(agent, selector, domain, label, value,
start_time):
failed = False
try:
done = time.time()
desc = create_new(
label, selector, value, domain,
agent=self.application.agent._name_ + '_inject',
processing_time=(done-start_time))
agent.push(desc)
except Exception as e:
failed = True
agent.ioloop.add_callback(self.report_result, failed)
self.application.agent.bus.busthread_call(
process_inject,
*(self.application.agent, selector, domain, label, value,
start_time))
def report_result(self, failed):
if failed:
self.set_status(500)
self.finish() | 0.534127 | 0.086903 |
# Legion - <NAME>, ConsenSys Diligence
import argparse
from legions.context import LegionContext
from legions.statusbar import LegionStatusBar
from nubia import PluginInterface, CompletionDataSource
from nubia.internal.blackcmd import CommandBlacklist
class LegionPlugin(PluginInterface):
"""
The PluginInterface class is a way to customize nubia for every customer
use case. It allowes custom argument validation, control over command
loading, custom context objects, and much more.
"""
def create_context(self):
"""
Must create an object that inherits from `Context` parent class.
The plugin can return a custom context but it has to inherit from the
correct parent class.
"""
return LegionContext()
def validate_args(self, args):
"""
This will be executed when starting nubia, the args passed is a
dict-like object that contains the argparse result after parsing the
command line arguments. The plugin can choose to update the context
with the values, and/or decide to raise `ArgsValidationError` with
the error message.
"""
pass
def get_opts_parser(self, add_help=True):
"""
Builds the ArgumentParser that will be passed to , use this to
build your list of arguments that you want for your shell.
"""
opts_parser = argparse.ArgumentParser(
description="Legion - EVM Node Security Toolkit",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,
)
opts_parser.add_argument(
"--config", "-c", default="", type=str, help="Configuration File"
)
opts_parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Increase verbosity, can be specified " "multiple times",
)
opts_parser.add_argument(
"--stderr",
"-s",
action="store_true",
help="By default the logging output goes to a "
"temporary file. This disables this feature "
"by sending the logging output to stderr",
)
return opts_parser
def get_completion_datasource_for_global_argument(self, argument):
if argument == "--config":
return ConfigFileCompletionDataSource()
return None
def create_usage_logger(self, context):
"""
Override this and return you own usage logger.
Must be a subtype of UsageLoggerInterface.
"""
return None
def get_status_bar(self, context):
"""
This returns the StatusBar object that handles the bottom status bar
and the right-side per-line status
"""
return LegionStatusBar(context)
def getBlacklistPlugin(self):
blacklister = CommandBlacklist()
blacklister.add_blocked_command("be-blocked")
return blacklister
class ConfigFileCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["/tmp/c1", "/tmp/c2"] | legions/plugin.py |
# Legion - <NAME>, ConsenSys Diligence
import argparse
from legions.context import LegionContext
from legions.statusbar import LegionStatusBar
from nubia import PluginInterface, CompletionDataSource
from nubia.internal.blackcmd import CommandBlacklist
class LegionPlugin(PluginInterface):
"""
The PluginInterface class is a way to customize nubia for every customer
use case. It allowes custom argument validation, control over command
loading, custom context objects, and much more.
"""
def create_context(self):
"""
Must create an object that inherits from `Context` parent class.
The plugin can return a custom context but it has to inherit from the
correct parent class.
"""
return LegionContext()
def validate_args(self, args):
"""
This will be executed when starting nubia, the args passed is a
dict-like object that contains the argparse result after parsing the
command line arguments. The plugin can choose to update the context
with the values, and/or decide to raise `ArgsValidationError` with
the error message.
"""
pass
def get_opts_parser(self, add_help=True):
"""
Builds the ArgumentParser that will be passed to , use this to
build your list of arguments that you want for your shell.
"""
opts_parser = argparse.ArgumentParser(
description="Legion - EVM Node Security Toolkit",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,
)
opts_parser.add_argument(
"--config", "-c", default="", type=str, help="Configuration File"
)
opts_parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Increase verbosity, can be specified " "multiple times",
)
opts_parser.add_argument(
"--stderr",
"-s",
action="store_true",
help="By default the logging output goes to a "
"temporary file. This disables this feature "
"by sending the logging output to stderr",
)
return opts_parser
def get_completion_datasource_for_global_argument(self, argument):
if argument == "--config":
return ConfigFileCompletionDataSource()
return None
def create_usage_logger(self, context):
"""
Override this and return you own usage logger.
Must be a subtype of UsageLoggerInterface.
"""
return None
def get_status_bar(self, context):
"""
This returns the StatusBar object that handles the bottom status bar
and the right-side per-line status
"""
return LegionStatusBar(context)
def getBlacklistPlugin(self):
blacklister = CommandBlacklist()
blacklister.add_blocked_command("be-blocked")
return blacklister
class ConfigFileCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["/tmp/c1", "/tmp/c2"] | 0.582254 | 0.229352 |
class chromaticity:
"""Store chromaticity coordinates in *x* and *y*."""
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return repr((self.x, self.y))
class point:
"""Point is a 2D point, with members *x* and *y*."""
def __init__(self, x, y):
self.x = x;
self.y = y;
def __repr__(self):
return repr((self.x, self.y))
class V2i(point):
"""V2i is a 2D point, with members *x* and *y*."""
pass
class V2f(point):
"""V2f is a 2D point, with members *x* and *y*."""
pass
class Box:
"""Box is a 2D box, specified by its two corners *min* and *max*, both of which are :class:`point` """
def __init__(self, min = None, max = None):
self.min = min
self.max = max
def __repr__(self):
return repr(self.min) + " - " + repr(self.max)
class Box2i(Box):
"""Box2i is a 2D box, specified by its two corners *min* and *max*."""
pass
class Box2f(Box):
"""Box2f is a 2D box, specified by its two corners *min* and *max*."""
pass
class Chromaticities:
"""
Chromaticities holds the set of chromaticity coordinates for *red*, *green*, *blue*, and *white*.
Each primary is a :class:`chromaticity`.
"""
def __init__(self, red = None, green = None, blue = None, white = None):
self.red = red
self.green = green
self.blue = blue
self.white = white
def __repr__(self):
return repr(self.red) + " " + repr(self.green) + " " + repr(self.blue) + " " + repr(self.white)
class LineOrder:
"""
.. index:: INCREASING_Y, DECREASING_Y, RANDOM_Y
LineOrder can have three possible values:
``INCREASING_Y``,
``DECREASING_Y``,
``RANDOM_Y``.
.. doctest::
>>> import Imath
>>> print Imath.LineOrder(Imath.LineOrder.DECREASING_Y)
DECREASING_Y
"""
INCREASING_Y = 0
DECREASING_Y = 1
RANDOM_Y = 2
def __init__(self, v):
self.v = v
def __repr__(self):
return ["INCREASING_Y", "DECREASING_Y", "RANDOM_Y"][self.v]
class Compression:
"""
.. index:: NO_COMPRESSION, RLE_COMPRESSION, ZIPS_COMPRESSION, ZIP_COMPRESSION, PIZ_COMPRESSION, PXR24_COMPRESSION
Compression can have possible values:
``NO_COMPRESSION``,
``RLE_COMPRESSION``,
``ZIPS_COMPRESSION``,
``ZIP_COMPRESSION``,
``PIZ_COMPRESSION``,
``PXR24_COMPRESSION``.
.. doctest::
>>> import Imath
>>> print Imath.Compression(Imath.Compression.RLE_COMPRESSION)
RLE_COMPRESSION
"""
NO_COMPRESSION = 0
RLE_COMPRESSION = 1
ZIPS_COMPRESSION = 2
ZIP_COMPRESSION = 3
PIZ_COMPRESSION = 4
PXR24_COMPRESSION = 5
def __init__(self, v):
"""l"""
self.v = v
def __repr__(self):
return [ "NO_COMPRESSION", "RLE_COMPRESSION", "ZIPS_COMPRESSION", "ZIP_COMPRESSION", "PIZ_COMPRESSION", "PXR24_COMPRESSION"][self.v]
class PixelType:
"""
.. index:: UINT, HALF, FLOAT
PixelType can have possible values ``UINT``, ``HALF``, ``FLOAT``.
.. doctest::
>>> import Imath
>>> print Imath.PixelType(Imath.PixelType.HALF)
HALF
"""
UINT = 0
HALF = 1
FLOAT = 2
def __init__(self, v):
self.v = v
def __repr__(self):
return ["UINT", "HALF", "FLOAT"][self.v]
class Channel:
"""
Channel defines the type and spatial layout of a channel.
*type* is a :class:`PixelType`.
*xSampling* is the number of X-axis pixels between samples.
*ySampling* is the number of Y-axis pixels between samples.
.. doctest::
>>> import Imath
>>> print Imath.Channel(Imath.PixelType(Imath.PixelType.FLOAT), 4, 4)
FLOAT (4, 4)
"""
def __init__(self, type = PixelType(PixelType.HALF), xSampling = 1, ySampling = 1):
self.type = type
self.xSampling = xSampling
self.ySampling = ySampling
def __repr__(self):
return repr(self.type) + " " + repr((self.xSampling, self.ySampling))
class PreviewImage:
"""
.. index:: RGBA, thumbnail, preview, JPEG, PIL, Python Imaging Library
PreviewImage is a small preview image, intended as a thumbnail version of the full image.
The image has size (*width*, *height*) and 8-bit pixel values are
given by string *pixels* in RGBA order from top-left to bottom-right.
For example, to create a preview image from a JPEG file using the popular
`Python Imaging Library <http://www.pythonware.com/library/pil/handbook/index.htm>`_:
.. doctest::
>>> import Image
>>> import Imath
>>> im = Image.open("lena.jpg").resize((100, 100)).convert("RGBA")
>>> print Imath.PreviewImage(im.size[0], im.size[1], im.tostring())
<Imath.PreviewImage instance 100x100>
"""
def __init__(self, width, height, pixels):
self.width = width
self.height = height
self.pixels = pixels
def __repr__(self):
return "<Imath.PreviewImage instance %dx%d>" % (self.width, self.height) | OpenEXR-1.2.0/build/lib.win-amd64-2.7/Imath.py | class chromaticity:
"""Store chromaticity coordinates in *x* and *y*."""
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return repr((self.x, self.y))
class point:
"""Point is a 2D point, with members *x* and *y*."""
def __init__(self, x, y):
self.x = x;
self.y = y;
def __repr__(self):
return repr((self.x, self.y))
class V2i(point):
"""V2i is a 2D point, with members *x* and *y*."""
pass
class V2f(point):
"""V2f is a 2D point, with members *x* and *y*."""
pass
class Box:
"""Box is a 2D box, specified by its two corners *min* and *max*, both of which are :class:`point` """
def __init__(self, min = None, max = None):
self.min = min
self.max = max
def __repr__(self):
return repr(self.min) + " - " + repr(self.max)
class Box2i(Box):
"""Box2i is a 2D box, specified by its two corners *min* and *max*."""
pass
class Box2f(Box):
"""Box2f is a 2D box, specified by its two corners *min* and *max*."""
pass
class Chromaticities:
"""
Chromaticities holds the set of chromaticity coordinates for *red*, *green*, *blue*, and *white*.
Each primary is a :class:`chromaticity`.
"""
def __init__(self, red = None, green = None, blue = None, white = None):
self.red = red
self.green = green
self.blue = blue
self.white = white
def __repr__(self):
return repr(self.red) + " " + repr(self.green) + " " + repr(self.blue) + " " + repr(self.white)
class LineOrder:
"""
.. index:: INCREASING_Y, DECREASING_Y, RANDOM_Y
LineOrder can have three possible values:
``INCREASING_Y``,
``DECREASING_Y``,
``RANDOM_Y``.
.. doctest::
>>> import Imath
>>> print Imath.LineOrder(Imath.LineOrder.DECREASING_Y)
DECREASING_Y
"""
INCREASING_Y = 0
DECREASING_Y = 1
RANDOM_Y = 2
def __init__(self, v):
self.v = v
def __repr__(self):
return ["INCREASING_Y", "DECREASING_Y", "RANDOM_Y"][self.v]
class Compression:
"""
.. index:: NO_COMPRESSION, RLE_COMPRESSION, ZIPS_COMPRESSION, ZIP_COMPRESSION, PIZ_COMPRESSION, PXR24_COMPRESSION
Compression can have possible values:
``NO_COMPRESSION``,
``RLE_COMPRESSION``,
``ZIPS_COMPRESSION``,
``ZIP_COMPRESSION``,
``PIZ_COMPRESSION``,
``PXR24_COMPRESSION``.
.. doctest::
>>> import Imath
>>> print Imath.Compression(Imath.Compression.RLE_COMPRESSION)
RLE_COMPRESSION
"""
NO_COMPRESSION = 0
RLE_COMPRESSION = 1
ZIPS_COMPRESSION = 2
ZIP_COMPRESSION = 3
PIZ_COMPRESSION = 4
PXR24_COMPRESSION = 5
def __init__(self, v):
"""l"""
self.v = v
def __repr__(self):
return [ "NO_COMPRESSION", "RLE_COMPRESSION", "ZIPS_COMPRESSION", "ZIP_COMPRESSION", "PIZ_COMPRESSION", "PXR24_COMPRESSION"][self.v]
class PixelType:
"""
.. index:: UINT, HALF, FLOAT
PixelType can have possible values ``UINT``, ``HALF``, ``FLOAT``.
.. doctest::
>>> import Imath
>>> print Imath.PixelType(Imath.PixelType.HALF)
HALF
"""
UINT = 0
HALF = 1
FLOAT = 2
def __init__(self, v):
self.v = v
def __repr__(self):
return ["UINT", "HALF", "FLOAT"][self.v]
class Channel:
"""
Channel defines the type and spatial layout of a channel.
*type* is a :class:`PixelType`.
*xSampling* is the number of X-axis pixels between samples.
*ySampling* is the number of Y-axis pixels between samples.
.. doctest::
>>> import Imath
>>> print Imath.Channel(Imath.PixelType(Imath.PixelType.FLOAT), 4, 4)
FLOAT (4, 4)
"""
def __init__(self, type = PixelType(PixelType.HALF), xSampling = 1, ySampling = 1):
self.type = type
self.xSampling = xSampling
self.ySampling = ySampling
def __repr__(self):
return repr(self.type) + " " + repr((self.xSampling, self.ySampling))
class PreviewImage:
"""
.. index:: RGBA, thumbnail, preview, JPEG, PIL, Python Imaging Library
PreviewImage is a small preview image, intended as a thumbnail version of the full image.
The image has size (*width*, *height*) and 8-bit pixel values are
given by string *pixels* in RGBA order from top-left to bottom-right.
For example, to create a preview image from a JPEG file using the popular
`Python Imaging Library <http://www.pythonware.com/library/pil/handbook/index.htm>`_:
.. doctest::
>>> import Image
>>> import Imath
>>> im = Image.open("lena.jpg").resize((100, 100)).convert("RGBA")
>>> print Imath.PreviewImage(im.size[0], im.size[1], im.tostring())
<Imath.PreviewImage instance 100x100>
"""
def __init__(self, width, height, pixels):
self.width = width
self.height = height
self.pixels = pixels
def __repr__(self):
return "<Imath.PreviewImage instance %dx%d>" % (self.width, self.height) | 0.94837 | 0.431225 |
import os
import subprocess
from gen_tools import run_ftool
import numpy as np
def ev2dpi(infile, outfile, tstart, tstop, e0, e1, detmask):
ftool = "batbinevt"
arg_list = [infile, outfile, 'DPI', '0', 'uniform', str(e0)+'-'+str(e1)]
arg_list += ['tstart='+str(tstart), 'tstop='+str(tstop), 'detmask='+detmask]
run_ftool(ftool, arg_list)
def ev2dpi_ebins(infile, outfile, tstart, tstop, ebins, detmask):
ftool = "batbinevt"
arg_list = [infile, outfile, 'DPI', '0', 'uniform', ebins]
arg_list += ['tstart='+str(tstart), 'tstop='+str(tstop), 'detmask='+detmask]
run_ftool(ftool, arg_list)
def bateconvert(infile, outfile, calfile):
ftool = "bateconvert"
arg_list = ['infile='+infile, 'calfile='+calfile, 'residfile=CALDB',
'outfile='+outfile, 'clobber=YES', 'pulserfile=CALDB',
'fltpulserfile=CALDB']
run_ftool(ftool, arg_list)
def detmask(infile, outfile, dmask):
ftool = "batdetmask"
arg_list = [infile, outfile, 'detmask='+dmask]
run_ftool(ftool, arg_list)
def mk_bkg_mod(infile, outfile, detmask):
ftool = "batclean"
arg_list = [infile, outfile]
arg_list += ['incatalog=NONE', 'detmask='+detmask, 'srcclean=NO', 'outversion=bkgfit']
run_ftool(ftool, arg_list)
def mk_pc_img(infile, outfile, detmask, attfile, ovrsmp=4, detapp=False):
ftool = "batfftimage"
arg_list = [infile, outfile]
arg_list += ['detmask='+detmask, 'attitude='+attfile, 'pcodemap=YES',
'clobber=YES']
if ovrsmp is not None:
arg_list += ['oversampx='+str(ovrsmp),'oversampy='+str(ovrsmp)]
if detapp:
arg_list.append("aperture=CALDB:DETECTION")
run_ftool(ftool, arg_list)
def mk_sky_img(infile, outfile, detmask, attfile, bkg_file=None,\
ovrsmp=2, sig_map=None, bkgvar_map=None,\
detapp=False, rebal=True):
ftool = "batfftimage"
arg_list = [infile, outfile]
arg_list += ['detmask='+detmask, 'attitude='+attfile,
'oversampx='+str(ovrsmp),'oversampy='+str(ovrsmp),
'clobber=YES']
if bkg_file is not None:
arg_list += ['bkgfile='+bkg_file]
if bkgvar_map is not None:
arg_list += ['bkgvarmap='+bkgvar_map]
if sig_map is not None:
arg_list += ['signifmap='+sig_map]
if detapp:
arg_list.append("aperture=CALDB:DETECTION")
if rebal:
arg_list.append("rebalance=YES")
else:
arg_list.append("rebalance=NO")
run_ftool(ftool, arg_list)
def run_batcelldetect(infile, cat_fname, snr_thresh=3.5,\
sigmap=None, bkgvar=None, ovrsmp=2,\
incat="NONE", pcode="NONE"):
ftool = "batcelldetect"
bkgradius = 15*ovrsmp
srcradius = 6*ovrsmp
arg_list = ["infile="+infile, "outfile="+cat_fname, "snrthresh="+str(snr_thresh)]
arg_list += ["bkgradius="+str(bkgradius),
"srcradius="+str(srcradius),
"nadjpix=2", "vectorflux=YES",
"incatalog="+incat, "niter=4",
"pcodefile="+pcode, "chatter=1"]
if sigmap is not None:
arg_list.append('signifmap='+sigmap)
if bkgvar is not None:
arg_list.append('bkgvarmap='+bkgvar)
print(arg_list)
run_ftool(ftool, arg_list) | HeasoftTools/bat_tool_funcs.py | import os
import subprocess
from gen_tools import run_ftool
import numpy as np
def ev2dpi(infile, outfile, tstart, tstop, e0, e1, detmask):
ftool = "batbinevt"
arg_list = [infile, outfile, 'DPI', '0', 'uniform', str(e0)+'-'+str(e1)]
arg_list += ['tstart='+str(tstart), 'tstop='+str(tstop), 'detmask='+detmask]
run_ftool(ftool, arg_list)
def ev2dpi_ebins(infile, outfile, tstart, tstop, ebins, detmask):
ftool = "batbinevt"
arg_list = [infile, outfile, 'DPI', '0', 'uniform', ebins]
arg_list += ['tstart='+str(tstart), 'tstop='+str(tstop), 'detmask='+detmask]
run_ftool(ftool, arg_list)
def bateconvert(infile, outfile, calfile):
ftool = "bateconvert"
arg_list = ['infile='+infile, 'calfile='+calfile, 'residfile=CALDB',
'outfile='+outfile, 'clobber=YES', 'pulserfile=CALDB',
'fltpulserfile=CALDB']
run_ftool(ftool, arg_list)
def detmask(infile, outfile, dmask):
ftool = "batdetmask"
arg_list = [infile, outfile, 'detmask='+dmask]
run_ftool(ftool, arg_list)
def mk_bkg_mod(infile, outfile, detmask):
ftool = "batclean"
arg_list = [infile, outfile]
arg_list += ['incatalog=NONE', 'detmask='+detmask, 'srcclean=NO', 'outversion=bkgfit']
run_ftool(ftool, arg_list)
def mk_pc_img(infile, outfile, detmask, attfile, ovrsmp=4, detapp=False):
ftool = "batfftimage"
arg_list = [infile, outfile]
arg_list += ['detmask='+detmask, 'attitude='+attfile, 'pcodemap=YES',
'clobber=YES']
if ovrsmp is not None:
arg_list += ['oversampx='+str(ovrsmp),'oversampy='+str(ovrsmp)]
if detapp:
arg_list.append("aperture=CALDB:DETECTION")
run_ftool(ftool, arg_list)
def mk_sky_img(infile, outfile, detmask, attfile, bkg_file=None,\
ovrsmp=2, sig_map=None, bkgvar_map=None,\
detapp=False, rebal=True):
ftool = "batfftimage"
arg_list = [infile, outfile]
arg_list += ['detmask='+detmask, 'attitude='+attfile,
'oversampx='+str(ovrsmp),'oversampy='+str(ovrsmp),
'clobber=YES']
if bkg_file is not None:
arg_list += ['bkgfile='+bkg_file]
if bkgvar_map is not None:
arg_list += ['bkgvarmap='+bkgvar_map]
if sig_map is not None:
arg_list += ['signifmap='+sig_map]
if detapp:
arg_list.append("aperture=CALDB:DETECTION")
if rebal:
arg_list.append("rebalance=YES")
else:
arg_list.append("rebalance=NO")
run_ftool(ftool, arg_list)
def run_batcelldetect(infile, cat_fname, snr_thresh=3.5,\
sigmap=None, bkgvar=None, ovrsmp=2,\
incat="NONE", pcode="NONE"):
ftool = "batcelldetect"
bkgradius = 15*ovrsmp
srcradius = 6*ovrsmp
arg_list = ["infile="+infile, "outfile="+cat_fname, "snrthresh="+str(snr_thresh)]
arg_list += ["bkgradius="+str(bkgradius),
"srcradius="+str(srcradius),
"nadjpix=2", "vectorflux=YES",
"incatalog="+incat, "niter=4",
"pcodefile="+pcode, "chatter=1"]
if sigmap is not None:
arg_list.append('signifmap='+sigmap)
if bkgvar is not None:
arg_list.append('bkgvarmap='+bkgvar)
print(arg_list)
run_ftool(ftool, arg_list) | 0.089244 | 0.161254 |
import math
from compas.geometry._core import allclose
__all__ = [
'quaternion_norm',
'quaternion_unitize',
'quaternion_is_unit',
'quaternion_multiply',
'quaternion_canonize',
'quaternion_conjugate',
]
ATOL = 1e-6 # absolute tolerance
def quaternion_norm(q):
"""Calculates the length (euclidean norm) of a quaternion.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
float
The length (euclidean norm) of a quaternion.
References
----------
.. _mathworld quaternion norm: http://mathworld.wolfram.com/QuaternionNorm.html
"""
return math.sqrt(sum([x * x for x in q]))
def quaternion_unitize(q):
"""Makes a quaternion unit-length.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Quaternion of length 1 as a list of four real values ``[nw, nx, ny, nz]``.
"""
n = quaternion_norm(q)
if allclose([n], [0.0], ATOL):
raise ValueError("The given quaternion has zero length.")
else:
return [x / n for x in q]
def quaternion_is_unit(q, tol=ATOL):
"""Checks if a quaternion is unit-length.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
tol : float, optional
Requested decimal precision.
Returns
-------
bool
``True`` if the quaternion is unit-length, and ``False`` if otherwise.
"""
n = quaternion_norm(q)
return allclose([n], [1.0], tol)
def quaternion_multiply(r, q):
"""Multiplies two quaternions.
Parameters
----------
r : list
Quaternion as a list of four real values ``[rw, rx, ry, rz]``.
q : list
Quaternion as a list of four real values ``[qw, qx, qy, qz]``.
Returns
-------
list
Quaternion :math:`p = rq` as a list of four real values ``[pw, px, py, pz]``.
Notes
-----
Multiplication of two quaternions :math:`p = rq` can be interpreted as applying rotation :math:`r` to an orientation :math:`q`,
provided that both :math:`r` and :math:`q` are unit-length.
The result is also unit-length.
Multiplication of quaternions is not commutative!
References
----------
.. _mathworld quaternion: http://mathworld.wolfram.com/Quaternion.html
"""
rw, rx, ry, rz = r
qw, qx, qy, qz = q
pw = rw*qw - rx*qx - ry*qy - rz*qz
px = rw*qx + rx*qw + ry*qz - rz*qy
py = rw*qy - rx*qz + ry*qw + rz*qx
pz = rw*qz + rx*qy - ry*qx + rz*qw
return [pw, px, py, pz]
def quaternion_canonize(q):
"""Converts a quaternion into a canonic form if needed.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Quaternion in a canonic form as a list of four real values ``[cw, cx, cy, cz]``.
Notes
-----
Canonic form means the scalar component is a non-negative number.
"""
if q[0] < 0.0:
return [-x for x in q]
return q[:]
def quaternion_conjugate(q):
"""Conjugate of a quaternion.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Conjugate quaternion as a list of four real values ``[cw, cx, cy, cz]``.
References
----------
.. _mathworld quaternion conjugate: http://mathworld.wolfram.com/QuaternionConjugate.html
"""
return [q[0], -q[1], -q[2], -q[3]] | src/compas/geometry/_core/quaternions.py | import math
from compas.geometry._core import allclose
__all__ = [
'quaternion_norm',
'quaternion_unitize',
'quaternion_is_unit',
'quaternion_multiply',
'quaternion_canonize',
'quaternion_conjugate',
]
ATOL = 1e-6 # absolute tolerance
def quaternion_norm(q):
"""Calculates the length (euclidean norm) of a quaternion.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
float
The length (euclidean norm) of a quaternion.
References
----------
.. _mathworld quaternion norm: http://mathworld.wolfram.com/QuaternionNorm.html
"""
return math.sqrt(sum([x * x for x in q]))
def quaternion_unitize(q):
"""Makes a quaternion unit-length.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Quaternion of length 1 as a list of four real values ``[nw, nx, ny, nz]``.
"""
n = quaternion_norm(q)
if allclose([n], [0.0], ATOL):
raise ValueError("The given quaternion has zero length.")
else:
return [x / n for x in q]
def quaternion_is_unit(q, tol=ATOL):
"""Checks if a quaternion is unit-length.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
tol : float, optional
Requested decimal precision.
Returns
-------
bool
``True`` if the quaternion is unit-length, and ``False`` if otherwise.
"""
n = quaternion_norm(q)
return allclose([n], [1.0], tol)
def quaternion_multiply(r, q):
"""Multiplies two quaternions.
Parameters
----------
r : list
Quaternion as a list of four real values ``[rw, rx, ry, rz]``.
q : list
Quaternion as a list of four real values ``[qw, qx, qy, qz]``.
Returns
-------
list
Quaternion :math:`p = rq` as a list of four real values ``[pw, px, py, pz]``.
Notes
-----
Multiplication of two quaternions :math:`p = rq` can be interpreted as applying rotation :math:`r` to an orientation :math:`q`,
provided that both :math:`r` and :math:`q` are unit-length.
The result is also unit-length.
Multiplication of quaternions is not commutative!
References
----------
.. _mathworld quaternion: http://mathworld.wolfram.com/Quaternion.html
"""
rw, rx, ry, rz = r
qw, qx, qy, qz = q
pw = rw*qw - rx*qx - ry*qy - rz*qz
px = rw*qx + rx*qw + ry*qz - rz*qy
py = rw*qy - rx*qz + ry*qw + rz*qx
pz = rw*qz + rx*qy - ry*qx + rz*qw
return [pw, px, py, pz]
def quaternion_canonize(q):
"""Converts a quaternion into a canonic form if needed.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Quaternion in a canonic form as a list of four real values ``[cw, cx, cy, cz]``.
Notes
-----
Canonic form means the scalar component is a non-negative number.
"""
if q[0] < 0.0:
return [-x for x in q]
return q[:]
def quaternion_conjugate(q):
"""Conjugate of a quaternion.
Parameters
----------
q : list
Quaternion as a list of four real values ``[w, x, y, z]``.
Returns
-------
list
Conjugate quaternion as a list of four real values ``[cw, cx, cy, cz]``.
References
----------
.. _mathworld quaternion conjugate: http://mathworld.wolfram.com/QuaternionConjugate.html
"""
return [q[0], -q[1], -q[2], -q[3]] | 0.952508 | 0.789356 |
"""Tests for cisconx acl rendering module."""
from absl.testing import absltest
from unittest import mock
from capirca.lib import cisconx
from capirca.lib import nacaddr
from capirca.lib import naming
from capirca.lib import policy
GOOD_HEADER = """
header {
comment:: "this is a test extended acl"
target:: cisconx test-filter extended
}
"""
GOOD_HEADER_2 = """
header {
comment:: "this is a test acl"
target:: cisconx test-filter
}
"""
GOOD_HEADER_IPV6 = """
header {
comment:: "this is a test inet6 acl"
target:: cisconx test-filter inet6
}
"""
GOOD_TERM = """
term good-term {
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: tcp
option:: tcp-established
policer:: batman
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
source-address:: SOME_HOST
destination-port:: SSH
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
source-address:: SOME_HOST2
destination-port:: GOPENFLOW
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_4 = """
term good-term-4 {
comment:: "Accept SNMP from internal sources."
address:: SOME_HOST
action:: accept
}
"""
GOOD_TERM_5 = """
term good-term-5 {
comment:: "Accept ESP from internal sources."
address:: SOME_HOST
protocol:: esp
action:: accept
}
"""
GOOD_TERM_6 = """
term good-term-6 {
comment:: "Accept AH from internal sources."
address:: SOME_HOST
protocol:: ah
action:: accept
}
"""
GOOD_TERM_7 = """
term good-term-6 {
comment:: "Accept AH from internal sources."
address:: SOME_HOST
protocol:: ah esp tcp
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'address',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'dscp_match',
'expiration',
'icmp_code',
'icmp_type',
'stateless_reply',
'logging',
'name',
'option',
'owner',
'platform',
'platform_exclude',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'translated',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request',
'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request',
'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
'option': {
'established',
'tcp-established',
'is-fragment',
'fragments'
}
}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class CiscoNXTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.naming = mock.create_autospec(naming.Naming)
def testRemark(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')]
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4, self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'remark this is a test extended acl'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = 'remark good-term-4'
self.assertIn(expected, str(acl), str(acl))
expected = 'test-filter remark'
self.assertNotIn(expected, str(acl), str(acl))
self.assertNotIn(' remark %sId:%s' % ('$', '$'), str(acl), str(acl))
self.assertIn(' remark "%sRevision:%s"' % ('$', '$'), str(acl), str(acl))
self.assertNotIn(' remark $', str(acl), str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testExtendedNXosSyntax(self):
# Extended access-lists should not use the "extended" argument to ip
# access-list.
acl = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO)
self.assertIn('ip access-list test-filter', str(acl))
def testBuildTokens(self):
pol1 = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testStandardTermHost(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/24')]
self.naming.GetServiceByProto.return_value = ['22', '6537']
pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_2 + GOOD_TERM_3,
self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'ip access-list test-filter'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 22'
self.assertIn(expected, str(acl), str(acl))
expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 6537'
self.assertIn(expected, str(acl), str(acl))
self.naming.GetNetAddr.assert_has_calls(
[mock.call('SOME_HOST'),
mock.call('SOME_HOST2')])
self.naming.GetServiceByProto.assert_has_calls(
[mock.call('SSH', 'tcp'),
mock.call('GOPENFLOW', 'tcp')])
def testStandardTermHostV6(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:1::/64')]
self.naming.GetServiceByProto.return_value = ['22']
pol = policy.ParsePolicy(GOOD_HEADER_IPV6 + GOOD_TERM_2, self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'ipv6 access-list test-filter'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = ' permit tcp 2620:1::/64 any eq 22'
self.assertIn(expected, str(acl), str(acl))
self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST')])
self.naming.GetServiceByProto.assert_has_calls([mock.call('SSH', 'tcp')])
if __name__ == '__main__':
absltest.main() | tests/lib/cisconx_test.py | """Tests for cisconx acl rendering module."""
from absl.testing import absltest
from unittest import mock
from capirca.lib import cisconx
from capirca.lib import nacaddr
from capirca.lib import naming
from capirca.lib import policy
GOOD_HEADER = """
header {
comment:: "this is a test extended acl"
target:: cisconx test-filter extended
}
"""
GOOD_HEADER_2 = """
header {
comment:: "this is a test acl"
target:: cisconx test-filter
}
"""
GOOD_HEADER_IPV6 = """
header {
comment:: "this is a test inet6 acl"
target:: cisconx test-filter inet6
}
"""
GOOD_TERM = """
term good-term {
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: tcp
option:: tcp-established
policer:: batman
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
source-address:: SOME_HOST
destination-port:: SSH
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
source-address:: SOME_HOST2
destination-port:: GOPENFLOW
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_4 = """
term good-term-4 {
comment:: "Accept SNMP from internal sources."
address:: SOME_HOST
action:: accept
}
"""
GOOD_TERM_5 = """
term good-term-5 {
comment:: "Accept ESP from internal sources."
address:: SOME_HOST
protocol:: esp
action:: accept
}
"""
GOOD_TERM_6 = """
term good-term-6 {
comment:: "Accept AH from internal sources."
address:: SOME_HOST
protocol:: ah
action:: accept
}
"""
GOOD_TERM_7 = """
term good-term-6 {
comment:: "Accept AH from internal sources."
address:: SOME_HOST
protocol:: ah esp tcp
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'address',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'dscp_match',
'expiration',
'icmp_code',
'icmp_type',
'stateless_reply',
'logging',
'name',
'option',
'owner',
'platform',
'platform_exclude',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'translated',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request',
'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request',
'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
'option': {
'established',
'tcp-established',
'is-fragment',
'fragments'
}
}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class CiscoNXTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.naming = mock.create_autospec(naming.Naming)
def testRemark(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')]
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4, self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'remark this is a test extended acl'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = 'remark good-term-4'
self.assertIn(expected, str(acl), str(acl))
expected = 'test-filter remark'
self.assertNotIn(expected, str(acl), str(acl))
self.assertNotIn(' remark %sId:%s' % ('$', '$'), str(acl), str(acl))
self.assertIn(' remark "%sRevision:%s"' % ('$', '$'), str(acl), str(acl))
self.assertNotIn(' remark $', str(acl), str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testExtendedNXosSyntax(self):
# Extended access-lists should not use the "extended" argument to ip
# access-list.
acl = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO)
self.assertIn('ip access-list test-filter', str(acl))
def testBuildTokens(self):
pol1 = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = cisconx.CiscoNX(
policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testStandardTermHost(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.0/24')]
self.naming.GetServiceByProto.return_value = ['22', '6537']
pol = policy.ParsePolicy(GOOD_HEADER_2 + GOOD_TERM_2 + GOOD_TERM_3,
self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'ip access-list test-filter'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 22'
self.assertIn(expected, str(acl), str(acl))
expected = ' permit tcp 10.1.1.0 0.0.0.255 any eq 6537'
self.assertIn(expected, str(acl), str(acl))
self.naming.GetNetAddr.assert_has_calls(
[mock.call('SOME_HOST'),
mock.call('SOME_HOST2')])
self.naming.GetServiceByProto.assert_has_calls(
[mock.call('SSH', 'tcp'),
mock.call('GOPENFLOW', 'tcp')])
def testStandardTermHostV6(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:1::/64')]
self.naming.GetServiceByProto.return_value = ['22']
pol = policy.ParsePolicy(GOOD_HEADER_IPV6 + GOOD_TERM_2, self.naming)
acl = cisconx.CiscoNX(pol, EXP_INFO)
expected = 'ipv6 access-list test-filter'
self.assertIn(expected, str(acl), '[%s]' % str(acl))
expected = ' permit tcp 2620:1::/64 any eq 22'
self.assertIn(expected, str(acl), str(acl))
self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST')])
self.naming.GetServiceByProto.assert_has_calls([mock.call('SSH', 'tcp')])
if __name__ == '__main__':
absltest.main() | 0.759582 | 0.376365 |
import torch.nn as nn
from torch.nn import Conv2d, BatchNorm2d, Dropout2d
import torch.nn.functional as nnf
from ..layers import ActivationFactory as AF
def _conv3x3(in_channels, out_channels, stride=1, groups=1, dilation=1, bias=False):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=bias,
dilation=dilation,
)
def _conv1x1(in_channels, out_channels, stride=1, bias=False):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)
def _make_downsample(in_channels, out_channels, stride, norm_layer, norm_before):
if norm_before:
return nn.Sequential(
_conv1x1(in_channels, out_channels, stride, bias=False),
norm_layer(out_channels),
)
return _conv1x1(in_channels, out_channels, stride, bias=True)
class ResNetInputBlock(nn.Module):
"""Input block for ResNet architecture
Args:
in_channels: input channels
out_channels: output channels
kernel_size: kernel size for conv
stride: stride for conv
activation: str/dict indicationg activation type and arguments
norm_layer: norm_layer object constructor, if None it uses BatchNorm2d
norm_before: if True it applies the norm_layer before the activation,
if False, after the activation
do_maxpool: apply maxpooling 2x2 at the output
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=7,
stride=2,
activation={"name": "relu", "inplace": True},
norm_layer=None,
norm_before=True,
do_maxpool=True,
):
super().__init__()
padding = int((kernel_size - 1) / 2)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
self.bn = norm_layer(out_channels)
self.act = AF.create(activation)
self.norm_before = norm_before
self.do_maxpool = do_maxpool
self.context = int((kernel_size - 1) / 2)
self.downsample_factor = stride
if do_maxpool:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.downsample_factor *= 2
def forward(self, x):
x = self.conv(x)
if self.norm_before:
x = self.bn(x)
x = self.act(x)
if not self.norm_before:
x = self.bn(x)
if self.do_maxpool:
x = self.maxpool(x)
return x
class ResNetBasicBlock(nn.Module):
expansion = 1
# __constants__ = ['downsample']
def __init__(
self,
in_channels,
channels,
activation={"name": "relu", "inplace": True},
stride=1,
dropout_rate=0,
groups=1,
dilation=1,
norm_layer=None,
norm_before=True,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.in_channels = in_channels
self.channels = channels
bias = not norm_before
self.conv1 = _conv3x3(
in_channels, channels, stride, groups, dilation, bias=bias
)
self.bn1 = norm_layer(channels)
self.act1 = AF.create(activation)
self.conv2 = _conv3x3(channels, channels, groups=groups, bias=bias)
self.bn2 = norm_layer(channels)
self.act2 = AF.create(activation)
self.stride = stride
self.norm_before = norm_before
self.downsample = None
if stride != 1 or in_channels != channels:
self.downsample = _make_downsample(
in_channels, channels, stride, norm_layer, norm_before
)
self.dropout_rate = dropout_rate
self.dropout = None
if dropout_rate > 0:
self.dropout = Dropout2d(dropout_rate)
self.context = dilation + stride
self.downsample_factor = stride
@property
def out_channels(self):
return self.channels
def forward(self, x):
residual = x
x = self.conv1(x)
if self.norm_before:
x = self.bn1(x)
x = self.act1(x)
if not self.norm_before:
x = self.bn1(x)
x = self.conv2(x)
if self.norm_before:
x = self.bn2(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.act2(x)
if not self.norm_before:
x = self.bn2(x)
if self.dropout_rate > 0:
x = self.dropout(x)
return x
class ResNetBNBlock(nn.Module):
expansion = 4
# __constants__ = ['downsample']
def __init__(
self,
in_channels,
channels,
activation={"name": "relu", "inplace": True},
stride=1,
dropout_rate=0,
groups=1,
dilation=1,
norm_layer=None,
norm_before=True,
):
super().__init__()
self.in_channels = in_channels
self.channels = channels
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = _conv1x1(in_channels, channels, bias=bias)
self.bn1 = norm_layer(channels)
self.conv2 = _conv3x3(channels, channels, stride, groups, dilation, bias=bias)
self.bn2 = norm_layer(channels)
self.conv3 = _conv1x1(channels, channels * self.expansion, bias=bias)
self.bn3 = norm_layer(channels * self.expansion)
self.act1 = AF.create(activation)
self.act2 = AF.create(activation)
self.act3 = AF.create(activation)
self.stride = stride
self.norm_before = norm_before
self.downsample = None
if stride != 1 or in_channels != channels * self.expansion:
self.downsample = _make_downsample(
in_channels, channels * self.expansion, stride, norm_layer, norm_before
)
self.dropout_rate = dropout_rate
self.dropout = None
if dropout_rate > 0:
self.dropout = Dropout2d(dropout_rate)
self.context = dilation
self.downsample_factor = stride
@property
def out_channels(self):
return self.channels * self.expansion
def forward(self, x):
residual = x
x = self.conv1(x)
if self.norm_before:
x = self.bn1(x)
x = self.act1(x)
if not self.norm_before:
x = self.bn1(x)
x = self.conv2(x)
if self.norm_before:
x = self.bn2(x)
x = self.act2(x)
if not self.norm_before:
x = self.bn2(x)
x = self.conv3(x)
if self.norm_before:
x = self.bn3(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.act3(x)
if not self.norm_before:
x = self.bn3(x)
if self.dropout_rate > 0:
x = self.dropout(x)
return x
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode="nearest"):
super().__init__()
self.interp = nnf.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class ResNetEndpointBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
scale,
activation={"name": "relu", "inplace": True},
norm_layer=None,
norm_before=True,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
self.out_channels = out_channels
self.in_channels = in_channels
self.norm_before = norm_before
if self.in_channels != self.out_channels:
self.conv = _conv1x1(in_channels, out_channels, bias=bias)
self.bn = norm_layer(out_channels)
self.act = AF.create(activation)
self.scale = scale
if self.scale > 1:
self.upsample = Interpolate(scale_factor=scale, mode="nearest")
def forward(self, x):
if self.in_channels != self.out_channels:
x = self.conv(x)
if self.norm_before:
x = self.bn(x)
x = self.act(x)
if not self.norm_before:
x = self.bn(x)
if self.scale > 1:
x = self.upsample(x)
return x | hyperion/torch/layer_blocks/resnet_blocks.py | import torch.nn as nn
from torch.nn import Conv2d, BatchNorm2d, Dropout2d
import torch.nn.functional as nnf
from ..layers import ActivationFactory as AF
def _conv3x3(in_channels, out_channels, stride=1, groups=1, dilation=1, bias=False):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=bias,
dilation=dilation,
)
def _conv1x1(in_channels, out_channels, stride=1, bias=False):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)
def _make_downsample(in_channels, out_channels, stride, norm_layer, norm_before):
if norm_before:
return nn.Sequential(
_conv1x1(in_channels, out_channels, stride, bias=False),
norm_layer(out_channels),
)
return _conv1x1(in_channels, out_channels, stride, bias=True)
class ResNetInputBlock(nn.Module):
"""Input block for ResNet architecture
Args:
in_channels: input channels
out_channels: output channels
kernel_size: kernel size for conv
stride: stride for conv
activation: str/dict indicationg activation type and arguments
norm_layer: norm_layer object constructor, if None it uses BatchNorm2d
norm_before: if True it applies the norm_layer before the activation,
if False, after the activation
do_maxpool: apply maxpooling 2x2 at the output
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=7,
stride=2,
activation={"name": "relu", "inplace": True},
norm_layer=None,
norm_before=True,
do_maxpool=True,
):
super().__init__()
padding = int((kernel_size - 1) / 2)
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
self.bn = norm_layer(out_channels)
self.act = AF.create(activation)
self.norm_before = norm_before
self.do_maxpool = do_maxpool
self.context = int((kernel_size - 1) / 2)
self.downsample_factor = stride
if do_maxpool:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.downsample_factor *= 2
def forward(self, x):
x = self.conv(x)
if self.norm_before:
x = self.bn(x)
x = self.act(x)
if not self.norm_before:
x = self.bn(x)
if self.do_maxpool:
x = self.maxpool(x)
return x
class ResNetBasicBlock(nn.Module):
expansion = 1
# __constants__ = ['downsample']
def __init__(
self,
in_channels,
channels,
activation={"name": "relu", "inplace": True},
stride=1,
dropout_rate=0,
groups=1,
dilation=1,
norm_layer=None,
norm_before=True,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.in_channels = in_channels
self.channels = channels
bias = not norm_before
self.conv1 = _conv3x3(
in_channels, channels, stride, groups, dilation, bias=bias
)
self.bn1 = norm_layer(channels)
self.act1 = AF.create(activation)
self.conv2 = _conv3x3(channels, channels, groups=groups, bias=bias)
self.bn2 = norm_layer(channels)
self.act2 = AF.create(activation)
self.stride = stride
self.norm_before = norm_before
self.downsample = None
if stride != 1 or in_channels != channels:
self.downsample = _make_downsample(
in_channels, channels, stride, norm_layer, norm_before
)
self.dropout_rate = dropout_rate
self.dropout = None
if dropout_rate > 0:
self.dropout = Dropout2d(dropout_rate)
self.context = dilation + stride
self.downsample_factor = stride
@property
def out_channels(self):
return self.channels
def forward(self, x):
residual = x
x = self.conv1(x)
if self.norm_before:
x = self.bn1(x)
x = self.act1(x)
if not self.norm_before:
x = self.bn1(x)
x = self.conv2(x)
if self.norm_before:
x = self.bn2(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.act2(x)
if not self.norm_before:
x = self.bn2(x)
if self.dropout_rate > 0:
x = self.dropout(x)
return x
class ResNetBNBlock(nn.Module):
expansion = 4
# __constants__ = ['downsample']
def __init__(
self,
in_channels,
channels,
activation={"name": "relu", "inplace": True},
stride=1,
dropout_rate=0,
groups=1,
dilation=1,
norm_layer=None,
norm_before=True,
):
super().__init__()
self.in_channels = in_channels
self.channels = channels
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = _conv1x1(in_channels, channels, bias=bias)
self.bn1 = norm_layer(channels)
self.conv2 = _conv3x3(channels, channels, stride, groups, dilation, bias=bias)
self.bn2 = norm_layer(channels)
self.conv3 = _conv1x1(channels, channels * self.expansion, bias=bias)
self.bn3 = norm_layer(channels * self.expansion)
self.act1 = AF.create(activation)
self.act2 = AF.create(activation)
self.act3 = AF.create(activation)
self.stride = stride
self.norm_before = norm_before
self.downsample = None
if stride != 1 or in_channels != channels * self.expansion:
self.downsample = _make_downsample(
in_channels, channels * self.expansion, stride, norm_layer, norm_before
)
self.dropout_rate = dropout_rate
self.dropout = None
if dropout_rate > 0:
self.dropout = Dropout2d(dropout_rate)
self.context = dilation
self.downsample_factor = stride
@property
def out_channels(self):
return self.channels * self.expansion
def forward(self, x):
residual = x
x = self.conv1(x)
if self.norm_before:
x = self.bn1(x)
x = self.act1(x)
if not self.norm_before:
x = self.bn1(x)
x = self.conv2(x)
if self.norm_before:
x = self.bn2(x)
x = self.act2(x)
if not self.norm_before:
x = self.bn2(x)
x = self.conv3(x)
if self.norm_before:
x = self.bn3(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.act3(x)
if not self.norm_before:
x = self.bn3(x)
if self.dropout_rate > 0:
x = self.dropout(x)
return x
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode="nearest"):
super().__init__()
self.interp = nnf.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class ResNetEndpointBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
scale,
activation={"name": "relu", "inplace": True},
norm_layer=None,
norm_before=True,
):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
bias = not norm_before
self.out_channels = out_channels
self.in_channels = in_channels
self.norm_before = norm_before
if self.in_channels != self.out_channels:
self.conv = _conv1x1(in_channels, out_channels, bias=bias)
self.bn = norm_layer(out_channels)
self.act = AF.create(activation)
self.scale = scale
if self.scale > 1:
self.upsample = Interpolate(scale_factor=scale, mode="nearest")
def forward(self, x):
if self.in_channels != self.out_channels:
x = self.conv(x)
if self.norm_before:
x = self.bn(x)
x = self.act(x)
if not self.norm_before:
x = self.bn(x)
if self.scale > 1:
x = self.upsample(x)
return x | 0.953046 | 0.497986 |
import unittest
import numpy as np
import torch # Import torch first!
from crf.py_permutohedral import PyPermutohedral
try:
import permuto_cpp
except ImportError as e:
raise (e, 'Did you import `torch` first?')
from pytorch_permuto.filters import PermutoFunction
from torch.autograd import gradcheck
class TestPermutoOp(unittest.TestCase):
def setUp(self) -> None:
self.n_features = 5
self.height = 5
self.width = 4
self.n_classes = 10
self.q_in = torch.randn(self.n_classes, self.height, self.width)
self.q_in_gradcheck = torch.randn(self.n_classes, self.height, self.width, requires_grad=True)
self.grad_q_out = torch.randn(self.n_classes, self.height, self.width)
self.features = torch.randn(self.height, self.width, self.n_features, requires_grad=False)
def test_spatial_filtering(self):
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in.double(), self.features)
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in, self.features.double())
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in,
torch.randn(self.height + 1, self.width, self.n_features))
def test_with_numpy_impl(self):
q_in_np = self.q_in.numpy()
q_in_np = np.ascontiguousarray(np.transpose(q_in_np, [1, 2, 0])) # Put channels at the end
np_out = np.zeros_like(q_in_np)
lattice = PyPermutohedral()
lattice.init(self.features.numpy(), num_dimensions=self.n_features, num_points=self.height * self.width)
lattice.compute(np_out, q_in_np, self.n_classes, False)
pytouch_out = permuto_cpp.forward(self.q_in, self.features)[0]
pytorch_out = np.transpose(pytouch_out.numpy(), [1, 2, 0])
self.assertAlmostEqual(np.max(np.abs(np_out - pytorch_out)), 0)
np.testing.assert_allclose(pytorch_out, np_out)
def test_spatial_filtering_backwards(self):
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out.double(), self.features)
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out, self.features.double())
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out,
torch.randn(self.height + 1, self.width, self.n_features))
def test_backwards_with_numpy_impl(self):
grad_q_out_np = self.grad_q_out.numpy()
grad_q_out_np = np.ascontiguousarray(np.transpose(grad_q_out_np, [1, 2, 0])) # Put channels at the end
grad_q_back_np = np.zeros_like(grad_q_out_np)
lattice = PyPermutohedral()
lattice.init(self.features.numpy(), num_dimensions=self.n_features, num_points=self.height * self.width)
lattice.compute(grad_q_back_np, grad_q_out_np, self.n_classes, True)
pytorch_grad_back = permuto_cpp.backward(self.grad_q_out, self.features)[0]
pytorch_grad_back = np.transpose(pytorch_grad_back.numpy(), [1, 2, 0])
self.assertAlmostEqual(np.max(np.abs(grad_q_back_np - pytorch_grad_back)), 0)
np.testing.assert_allclose(pytorch_grad_back, grad_q_back_np)
def test_op_with_autograd_gradcheck(self):
"""
Testing for 32 bit machine precision i.e. eps=3e-4 (relative perturbation : sqrt(machine precision))
"""
PermutoFunc = PermutoFunction.apply
test = gradcheck(PermutoFunc, (self.q_in_gradcheck, self.features), eps=3e-4, atol=1e-3, rtol=1e-5,
raise_exception=True)
self.assertTrue(test)
if __name__ == '__main__':
unittest.main() | pytorch_permuto/pytorch_permuto/unit_tests/test_permuto_op.py | import unittest
import numpy as np
import torch # Import torch first!
from crf.py_permutohedral import PyPermutohedral
try:
import permuto_cpp
except ImportError as e:
raise (e, 'Did you import `torch` first?')
from pytorch_permuto.filters import PermutoFunction
from torch.autograd import gradcheck
class TestPermutoOp(unittest.TestCase):
def setUp(self) -> None:
self.n_features = 5
self.height = 5
self.width = 4
self.n_classes = 10
self.q_in = torch.randn(self.n_classes, self.height, self.width)
self.q_in_gradcheck = torch.randn(self.n_classes, self.height, self.width, requires_grad=True)
self.grad_q_out = torch.randn(self.n_classes, self.height, self.width)
self.features = torch.randn(self.height, self.width, self.n_features, requires_grad=False)
def test_spatial_filtering(self):
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in.double(), self.features)
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in, self.features.double())
self.assertRaises(RuntimeError, permuto_cpp.forward, self.q_in,
torch.randn(self.height + 1, self.width, self.n_features))
def test_with_numpy_impl(self):
q_in_np = self.q_in.numpy()
q_in_np = np.ascontiguousarray(np.transpose(q_in_np, [1, 2, 0])) # Put channels at the end
np_out = np.zeros_like(q_in_np)
lattice = PyPermutohedral()
lattice.init(self.features.numpy(), num_dimensions=self.n_features, num_points=self.height * self.width)
lattice.compute(np_out, q_in_np, self.n_classes, False)
pytouch_out = permuto_cpp.forward(self.q_in, self.features)[0]
pytorch_out = np.transpose(pytouch_out.numpy(), [1, 2, 0])
self.assertAlmostEqual(np.max(np.abs(np_out - pytorch_out)), 0)
np.testing.assert_allclose(pytorch_out, np_out)
def test_spatial_filtering_backwards(self):
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out.double(), self.features)
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out, self.features.double())
self.assertRaises(RuntimeError, permuto_cpp.backward, self.grad_q_out,
torch.randn(self.height + 1, self.width, self.n_features))
def test_backwards_with_numpy_impl(self):
grad_q_out_np = self.grad_q_out.numpy()
grad_q_out_np = np.ascontiguousarray(np.transpose(grad_q_out_np, [1, 2, 0])) # Put channels at the end
grad_q_back_np = np.zeros_like(grad_q_out_np)
lattice = PyPermutohedral()
lattice.init(self.features.numpy(), num_dimensions=self.n_features, num_points=self.height * self.width)
lattice.compute(grad_q_back_np, grad_q_out_np, self.n_classes, True)
pytorch_grad_back = permuto_cpp.backward(self.grad_q_out, self.features)[0]
pytorch_grad_back = np.transpose(pytorch_grad_back.numpy(), [1, 2, 0])
self.assertAlmostEqual(np.max(np.abs(grad_q_back_np - pytorch_grad_back)), 0)
np.testing.assert_allclose(pytorch_grad_back, grad_q_back_np)
def test_op_with_autograd_gradcheck(self):
"""
Testing for 32 bit machine precision i.e. eps=3e-4 (relative perturbation : sqrt(machine precision))
"""
PermutoFunc = PermutoFunction.apply
test = gradcheck(PermutoFunc, (self.q_in_gradcheck, self.features), eps=3e-4, atol=1e-3, rtol=1e-5,
raise_exception=True)
self.assertTrue(test)
if __name__ == '__main__':
unittest.main() | 0.763924 | 0.694374 |
"""Simplest FFN model, as described in https://arxiv.org/abs/1611.00421."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .. import model
from . import convstack_3d
from .. import optimizer
from tensorflow.python.util import deprecation
import horovod.tensorflow as hvd
# Note: this model was originally trained with conv3d layers initialized with
# TruncatedNormalInitializedVariable with stddev = 0.01.
class HorovodConvStack3DFFNModel(convstack_3d.ConvStack3DFFNModel):
def define_tf_graph(self):
self.show_center_slice(self.input_seed)
if self.input_patches is None:
self.input_patches = tf.placeholder(
tf.float32, [1] + list(self.input_image_size[::-1]) +[1],
name='patches')
net = tf.concat([self.input_patches, self.input_seed], 4)
with tf.variable_scope('seed_update', reuse=False):
logit_update = convstack_3d._predict_object_mask(net, self.depth)
logit_seed = self.update_seed(self.input_seed, logit_update)
# Make predictions available, both as probabilities and logits.
self.logits = logit_seed
self.logistic = tf.sigmoid(logit_seed)
if self.labels is not None:
self.set_up_sigmoid_pixelwise_loss(logit_seed)
self.set_up_optimizer()
self.show_center_slice(logit_seed)
self.show_center_slice(self.labels, sigmoid=False)
self.add_summaries()
self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
def set_up_optimizer_old(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.optimizer_from_flags()
opt = hvd.DistributedOptimizer(opt)
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
# TODO(b/34707785): Hopefully remove need for these deprecated calls. Let
# one warning through so that we have some (low) possibility of noticing if
# the message changes.
trainables = tf.trainable_variables()
if trainables:
var = trainables[0]
tf.contrib.deprecated.histogram_summary(var.op.name, var)
with deprecation.silence():
for var in trainables[1:]:
tf.contrib.deprecated.histogram_summary(var.op.name, var)
for grad, var in grads_and_vars:
tf.contrib.deprecated.histogram_summary(
'gradients/' + var.op.name, grad)
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train')
def set_up_optimizer(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.hvd_optimizer_from_flags(hvd.size())
opt = hvd.DistributedOptimizer(opt)
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
trainables = tf.trainable_variables()
if trainables:
for var in trainables:
tf.summary.histogram(var.name.replace(':0', ''), var)
for grad, var in grads_and_vars:
tf.summary.histogram(
'gradients/%s' % var.name.replace(':0', ''), grad)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train') | ffn/training/models/horovod_convstack_3d.py | """Simplest FFN model, as described in https://arxiv.org/abs/1611.00421."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .. import model
from . import convstack_3d
from .. import optimizer
from tensorflow.python.util import deprecation
import horovod.tensorflow as hvd
# Note: this model was originally trained with conv3d layers initialized with
# TruncatedNormalInitializedVariable with stddev = 0.01.
class HorovodConvStack3DFFNModel(convstack_3d.ConvStack3DFFNModel):
def define_tf_graph(self):
self.show_center_slice(self.input_seed)
if self.input_patches is None:
self.input_patches = tf.placeholder(
tf.float32, [1] + list(self.input_image_size[::-1]) +[1],
name='patches')
net = tf.concat([self.input_patches, self.input_seed], 4)
with tf.variable_scope('seed_update', reuse=False):
logit_update = convstack_3d._predict_object_mask(net, self.depth)
logit_seed = self.update_seed(self.input_seed, logit_update)
# Make predictions available, both as probabilities and logits.
self.logits = logit_seed
self.logistic = tf.sigmoid(logit_seed)
if self.labels is not None:
self.set_up_sigmoid_pixelwise_loss(logit_seed)
self.set_up_optimizer()
self.show_center_slice(logit_seed)
self.show_center_slice(self.labels, sigmoid=False)
self.add_summaries()
self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
def set_up_optimizer_old(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.optimizer_from_flags()
opt = hvd.DistributedOptimizer(opt)
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
# TODO(b/34707785): Hopefully remove need for these deprecated calls. Let
# one warning through so that we have some (low) possibility of noticing if
# the message changes.
trainables = tf.trainable_variables()
if trainables:
var = trainables[0]
tf.contrib.deprecated.histogram_summary(var.op.name, var)
with deprecation.silence():
for var in trainables[1:]:
tf.contrib.deprecated.histogram_summary(var.op.name, var)
for grad, var in grads_and_vars:
tf.contrib.deprecated.histogram_summary(
'gradients/' + var.op.name, grad)
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train')
def set_up_optimizer(self, loss=None, max_gradient_entry_mag=0.7):
"""Sets up the training op for the model."""
if loss is None:
loss = self.loss
tf.summary.scalar('optimizer_loss', self.loss)
opt = optimizer.hvd_optimizer_from_flags(hvd.size())
opt = hvd.DistributedOptimizer(opt)
grads_and_vars = opt.compute_gradients(loss)
for g, v in grads_and_vars:
if g is None:
tf.logging.error('Gradient is None: %s', v.op.name)
if max_gradient_entry_mag > 0.0:
grads_and_vars = [(tf.clip_by_value(g,
-max_gradient_entry_mag,
+max_gradient_entry_mag), v)
for g, v, in grads_and_vars]
trainables = tf.trainable_variables()
if trainables:
for var in trainables:
tf.summary.histogram(var.name.replace(':0', ''), var)
for grad, var in grads_and_vars:
tf.summary.histogram(
'gradients/%s' % var.name.replace(':0', ''), grad)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = opt.apply_gradients(grads_and_vars,
global_step=self.global_step,
name='train') | 0.806205 | 0.433742 |
import os
from os import path
import logging
import kubernetes
from flask import Blueprint, jsonify, request
from kubernetes.client.rest import ApiException
from operator_service.config import Config
from operator_service.data_store import create_sql_job, get_sql_status, get_sql_jobs, stop_sql_job, remove_sql_job
from operator_service.kubernetes_api import KubeAPI
from operator_service.utils import (
create_compute_job,
check_required_attributes,
generate_new_id,
process_signature_validation,
get_compute_resources
)
services = Blueprint('services', __name__)
# Configuration to connect to k8s.
if not path.exists('/.dockerenv'):
kubernetes.config.load_kube_config()
else:
kubernetes.config.load_incluster_config()
config = Config()
@services.route('/compute', methods=['POST'])
def start_compute_job():
"""
Create and start the compute job
---
tags:
- operation
consumes:
- application/json
parameters:
- in: body
name: body
required: false
description: Init workflow.
schema:
workflow:
nullable: true
example: {
"agreementId":"0x111111",
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"providerSignature":"ae01",
"workflow":{
"stages": [
{
"index": 0,
"input": [
{
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": [
"https://data.ok.gov/sites/default/files/unspsc%20codes_3.csv"
],
"index": 0
},
{
"id": "did:op:1384941e6f0b46299b6e515723df3d8e8e5d1fb175554467a1cb7bc613f5c72e",
"url": [
"https://data.ct.gov/api/views/2fi9-sgi3/rows.csv?accessType=DOWNLOAD"
],
"index": 1
}
],
"compute": {
"Instances": 1,
"namespace": "withgpu",
"maxtime": 3600
},
"algorithm": {
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": "https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js",
"rawcode": "console.log('this is a test')",
"container": {
"image": "node",
"tag": "10",
"entrypoint": "node $ALGO"
}
},
"output": {
"nodeUri": "https://nile.dev-ocean.com",
"brizoUri": "https://brizo.marketplace.dev-ocean.com",
"brizoAddress": "0x4aaab179035dc57b35e2ce066919048686f82972",
"metadata": {
"name": "Workflow output"
},
"metadataUri": "https://aquarius.marketplace.dev-ocean.com",
"secretStoreUri": "https://secret-store.nile.dev-ocean.com",
"whitelist": [
"0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e",
"0xACBd138aBD70e2F00903268F3Db08f2D25677C9e"
],
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"publishOutput":true,
"publishAlgorithmLog":true
}
}
]
}
}
response:
201:
description: Workflow inited successfully.
400:
description: Some error
"""
data = request.args if request.args else request.json
required_attributes = [
'workflow',
'agreementId',
'owner',
'providerSignature'
]
msg, status = check_required_attributes(required_attributes, data, 'POST:/compute')
if msg:
return jsonify(error=msg), status
workflow = data.get('workflow')
agreement_id = data.get('agreementId')
owner = data.get('owner')
if not workflow:
return jsonify(error=f'`workflow` is required in the payload and must '
f'include workflow stages'), 400
# verify provider's signature
msg, status = process_signature_validation(data.get('providerSignature'), agreement_id)
if msg:
return jsonify(error=f'`providerSignature` of agreementId is required.'), status
stages = workflow.get('stages')
if not stages:
logging.error(f'Missing stages')
return jsonify(error='Missing stages'), 400
for _attr in ('algorithm', 'compute', 'input', 'output'):
if _attr not in stages[0]:
logging.error(f'Missing {_attr} in stage 0')
return jsonify(error=f'Missing {_attr} in stage 0'), 400
# loop through stages and add resources
timeout = int(os.getenv("ALGO_POD_TIMEOUT", 0))
compute_resources_def = get_compute_resources()
for count, astage in enumerate(workflow['stages']):
# check timeouts
if timeout > 0:
if 'maxtime' in astage['compute']:
maxtime = int(astage['compute']['maxtime'])
else:
maxtime = 0
if timeout < maxtime or maxtime <= 0:
astage['compute']['maxtime'] = timeout
logging.debug(f"Maxtime in workflow was {maxtime}. Overwritten to {timeout}")
# get resources
astage['compute']['resources'] = compute_resources_def
job_id = generate_new_id()
logging.debug(f'Got job_id: {job_id}')
body = create_compute_job(
workflow, job_id, config.group, config.version, config.namespace
)
logging.debug(f'Got body: {body}')
kube_api = KubeAPI(config)
try:
api_response = kube_api.create_namespaced_custom_object(body)
logging.info(api_response)
create_sql_job(agreement_id, str(job_id), owner)
status_list = get_sql_status(agreement_id, str(job_id), owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->create_namespaced_custom_object: {e}')
return jsonify(error='Unable to create job'), 400
@services.route('/compute', methods=['PUT'])
def stop_compute_job():
"""
Stop the current compute job..
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
for ajob in jobs_list:
name = ajob
logging.info(f'Stopping job : {name}')
stop_sql_job(name)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when stopping compute job: {e}')
return jsonify(error=f'Error stopping job: {e}'), 400
@services.route('/compute', methods=['DELETE'])
def delete_compute_job():
"""
Deletes the current compute job.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
body = kubernetes.client.V1DeleteOptions() # V1DeleteOptions |
grace_period_seconds = 56 # int | The duration in seconds before the object should be
# deleted. Value must be non-negative integer. The value zero indicates delete immediately.
# If this value is nil, the default grace period for the specified type will be used.
# Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field
# will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false,
# the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either
# this field or PropagationPolicy may be set, but not both. (optional)
propagation_policy = 'propagation_policy_example' # str | Whether and how garbage collection
# will be performed. Either this field or OrphanDependents may be set, but not both. The
# default policy is decided by the existing finalizer set in the metadata.finalizers and the
# resource-specific default policy. (optional)
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
kube_api = KubeAPI(config)
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
logging.debug(f'Got {jobs_list}')
for ajob in jobs_list:
name = ajob
remove_sql_job(name)
api_response = kube_api.delete_namespaced_custom_object(
name,
body,
grace_period_seconds=grace_period_seconds,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy
)
logging.debug(api_response)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->delete_namespaced_custom_object: {e}')
return jsonify(error=f'Error deleting job {e}'), 400
@services.route('/compute', methods=['GET'])
def get_compute_job_status():
"""
Get status for an specific or multiple jobs.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
responses:
200:
description: Get correctly the status
400:
description: Error
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
logging.debug("Try to start")
api_response = get_sql_status(agreement_id, job_id, owner)
return jsonify(api_response), 200
except ApiException as e:
msg = f'Error getting the status: {e}'
logging.error(msg)
return jsonify(error=msg), 400 | operator_service/routes.py | import os
from os import path
import logging
import kubernetes
from flask import Blueprint, jsonify, request
from kubernetes.client.rest import ApiException
from operator_service.config import Config
from operator_service.data_store import create_sql_job, get_sql_status, get_sql_jobs, stop_sql_job, remove_sql_job
from operator_service.kubernetes_api import KubeAPI
from operator_service.utils import (
create_compute_job,
check_required_attributes,
generate_new_id,
process_signature_validation,
get_compute_resources
)
services = Blueprint('services', __name__)
# Configuration to connect to k8s.
if not path.exists('/.dockerenv'):
kubernetes.config.load_kube_config()
else:
kubernetes.config.load_incluster_config()
config = Config()
@services.route('/compute', methods=['POST'])
def start_compute_job():
"""
Create and start the compute job
---
tags:
- operation
consumes:
- application/json
parameters:
- in: body
name: body
required: false
description: Init workflow.
schema:
workflow:
nullable: true
example: {
"agreementId":"0x111111",
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"providerSignature":"ae01",
"workflow":{
"stages": [
{
"index": 0,
"input": [
{
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": [
"https://data.ok.gov/sites/default/files/unspsc%20codes_3.csv"
],
"index": 0
},
{
"id": "did:op:1384941e6f0b46299b6e515723df3d8e8e5d1fb175554467a1cb7bc613f5c72e",
"url": [
"https://data.ct.gov/api/views/2fi9-sgi3/rows.csv?accessType=DOWNLOAD"
],
"index": 1
}
],
"compute": {
"Instances": 1,
"namespace": "withgpu",
"maxtime": 3600
},
"algorithm": {
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": "https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js",
"rawcode": "console.log('this is a test')",
"container": {
"image": "node",
"tag": "10",
"entrypoint": "node $ALGO"
}
},
"output": {
"nodeUri": "https://nile.dev-ocean.com",
"brizoUri": "https://brizo.marketplace.dev-ocean.com",
"brizoAddress": "0x4aaab179035dc57b35e2ce066919048686f82972",
"metadata": {
"name": "Workflow output"
},
"metadataUri": "https://aquarius.marketplace.dev-ocean.com",
"secretStoreUri": "https://secret-store.nile.dev-ocean.com",
"whitelist": [
"0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e",
"0xACBd138aBD70e2F00903268F3Db08f2D25677C9e"
],
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"publishOutput":true,
"publishAlgorithmLog":true
}
}
]
}
}
response:
201:
description: Workflow inited successfully.
400:
description: Some error
"""
data = request.args if request.args else request.json
required_attributes = [
'workflow',
'agreementId',
'owner',
'providerSignature'
]
msg, status = check_required_attributes(required_attributes, data, 'POST:/compute')
if msg:
return jsonify(error=msg), status
workflow = data.get('workflow')
agreement_id = data.get('agreementId')
owner = data.get('owner')
if not workflow:
return jsonify(error=f'`workflow` is required in the payload and must '
f'include workflow stages'), 400
# verify provider's signature
msg, status = process_signature_validation(data.get('providerSignature'), agreement_id)
if msg:
return jsonify(error=f'`providerSignature` of agreementId is required.'), status
stages = workflow.get('stages')
if not stages:
logging.error(f'Missing stages')
return jsonify(error='Missing stages'), 400
for _attr in ('algorithm', 'compute', 'input', 'output'):
if _attr not in stages[0]:
logging.error(f'Missing {_attr} in stage 0')
return jsonify(error=f'Missing {_attr} in stage 0'), 400
# loop through stages and add resources
timeout = int(os.getenv("ALGO_POD_TIMEOUT", 0))
compute_resources_def = get_compute_resources()
for count, astage in enumerate(workflow['stages']):
# check timeouts
if timeout > 0:
if 'maxtime' in astage['compute']:
maxtime = int(astage['compute']['maxtime'])
else:
maxtime = 0
if timeout < maxtime or maxtime <= 0:
astage['compute']['maxtime'] = timeout
logging.debug(f"Maxtime in workflow was {maxtime}. Overwritten to {timeout}")
# get resources
astage['compute']['resources'] = compute_resources_def
job_id = generate_new_id()
logging.debug(f'Got job_id: {job_id}')
body = create_compute_job(
workflow, job_id, config.group, config.version, config.namespace
)
logging.debug(f'Got body: {body}')
kube_api = KubeAPI(config)
try:
api_response = kube_api.create_namespaced_custom_object(body)
logging.info(api_response)
create_sql_job(agreement_id, str(job_id), owner)
status_list = get_sql_status(agreement_id, str(job_id), owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->create_namespaced_custom_object: {e}')
return jsonify(error='Unable to create job'), 400
@services.route('/compute', methods=['PUT'])
def stop_compute_job():
"""
Stop the current compute job..
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
for ajob in jobs_list:
name = ajob
logging.info(f'Stopping job : {name}')
stop_sql_job(name)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when stopping compute job: {e}')
return jsonify(error=f'Error stopping job: {e}'), 400
@services.route('/compute', methods=['DELETE'])
def delete_compute_job():
"""
Deletes the current compute job.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
body = kubernetes.client.V1DeleteOptions() # V1DeleteOptions |
grace_period_seconds = 56 # int | The duration in seconds before the object should be
# deleted. Value must be non-negative integer. The value zero indicates delete immediately.
# If this value is nil, the default grace period for the specified type will be used.
# Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field
# will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false,
# the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either
# this field or PropagationPolicy may be set, but not both. (optional)
propagation_policy = 'propagation_policy_example' # str | Whether and how garbage collection
# will be performed. Either this field or OrphanDependents may be set, but not both. The
# default policy is decided by the existing finalizer set in the metadata.finalizers and the
# resource-specific default policy. (optional)
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
kube_api = KubeAPI(config)
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
logging.debug(f'Got {jobs_list}')
for ajob in jobs_list:
name = ajob
remove_sql_job(name)
api_response = kube_api.delete_namespaced_custom_object(
name,
body,
grace_period_seconds=grace_period_seconds,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy
)
logging.debug(api_response)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->delete_namespaced_custom_object: {e}')
return jsonify(error=f'Error deleting job {e}'), 400
@services.route('/compute', methods=['GET'])
def get_compute_job_status():
"""
Get status for an specific or multiple jobs.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
responses:
200:
description: Get correctly the status
400:
description: Error
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
logging.debug("Try to start")
api_response = get_sql_status(agreement_id, job_id, owner)
return jsonify(api_response), 200
except ApiException as e:
msg = f'Error getting the status: {e}'
logging.error(msg)
return jsonify(error=msg), 400 | 0.495361 | 0.165492 |
import json
import time
from asyncio import TimeoutError
from datetime import timedelta
from json.decoder import JSONDecodeError
from random import choice
from typing import Union
import discord
from discord.ext import commands, tasks
from discord.ext.commands import Cog, command
from lib import (FutureTime,
GiveawayConfig, LinksAndVars, cache, convert,
Database, format_relative,
is_mod)
from DiscordUtils import SuccessEmbed, Embed, ErrorEmbed
class Giveaway(Cog):
def __init__(self, bot):
self.bot = bot
self.description = "Helps you to organise a simple giveaway."
self.giveaway_image = LinksAndVars.giveaway_image.value
self.declare_results.start()
@cache()
async def get_giveaway_config(
self,
giveaway_id: discord.Message,
):
return await GiveawayConfig.from_record(giveaway_id, self.bot)
@property
def display_emoji(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name="\N{PARTY POPPER}")
async def database_class(self):
'''The database classs'''
return await self.bot.db.new(Database.database_category_name.value, Database.giveaway_time_channel_name.value)
async def create_timer_for_giveaway(self, giveaway_id: discord.Message, time_ends: Union[int, FutureTime]) -> None:
"""Creates the timer for the giveaway
:param giveaway_id: Giveaway id
:type giveaway_id: discord.Message
:param time_ends: Time when the giveaway will end
:type time_ends: Union[int, FutureTime]
"""
database = await self.database_class()
await database.set(giveaway_id.id, [int(time_ends), giveaway_id.jump_url])
@tasks.loop(minutes=30, reconnect=True)
async def declare_results(self):
database = await self.database_class()
async for message in database._Database__channel.history(limit=None):
cnt = message.content
try:
data = json.loads(str(cnt))
data.pop("type")
data_keys = list(map(str, list(data.keys())))
try:
giveaway_message = await commands.MessageConverter().convert(await self.bot.get_context(message), data[data_keys[0]][1])
timestamp = data[data_keys[0]][0]
if discord.utils.utcnow().timestamp() >= int(timestamp):
winner = await self.determine_winner(giveaway_message, self.bot)
await giveaway_message.channel.send(
f"\U0001f389 Congratulations **{winner.mention}** on winning the Giveaway \U0001f389",
reference=giveaway_message
)
await message.delete()
self.get_giveaway_config.invalidate(self, giveaway_message.id)
except (commands.ChannelNotFound, commands.MessageNotFound, commands.ChannelNotReadable):
await message.delete()
except JSONDecodeError:
await message.delete()
@command(
name="giveaway",
aliases=["gcreate", "gcr", "giftcr"],
)
@commands.guild_only()
@is_mod()
async def create_giveaway(self, ctx: commands.Context):
"""Allowes you to to create giveaway by answering some simple questions!"""
# Ask Questions
embed = Embed(
title="Giveaway Time!! \U00002728",
description="Time for a new Giveaway. Answer the following questions in 25 seconds each for the Giveaway",
color=ctx.author.color,
)
await ctx.send(embed=embed)
questions = [
"In Which channel do you want to host the giveaway?",
"For How long should the Giveaway be hosted ? type number followed (s|m|h|d)",
"What is the Prize?",
"What role should a person must have in order to enter? If no roles required then type `none`",
"Tasks that the person should do in order to participate? If no tasks then type `none`",
]
answers = []
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
for i, question in enumerate(questions):
embed = Embed(title=f"Question {i+1}", description=question)
await ctx.send(embed=embed)
try:
message = await self.bot.wait_for("message",
timeout=25,
check=check)
except TimeoutError:
await ctx.send("You didn't answer the questions in Time")
return
answers.append(message.content)
# Check if Channel Id is valid
try:
channel_id = int(answers[0][2:-1])
except:
await ctx.send(
f"The Channel provided was wrong. The channel provided should be like {ctx.channel.mention}"
)
return
channel = self.bot.get_channel(channel_id)
# Check if the role is valid
role = answers[3]
if role.lower() in ("none", "no", "no roles"):
role = None
else:
try:
int(answers[3][3:-1])
except:
i = ctx.guild.roles
for j in i:
if j.name in ("@everyone", "@here"):
i.remove(j)
bot_roles = choice(i)
await ctx.send(
f"The role provided was wrong. The role should be like {bot_roles.mention}"
)
return
time_ends = convert(answers[1])*1000
# Check if Time is valid
if time == -1:
await ctx.send("The Time format was wrong")
return
if time == -2:
await ctx.send("The Time was not conventional number")
return
prize = answers[2]
task = answers[4]
if task.lower() in ("none", "no", "no task"):
task = None
embed = Embed(
title="**:tada::tada: Giveaway Time !! :tada::tada:**",
description=f":gift: Win a **{prize}** today",
colour=0x00FFFF,
)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar.url)
embed.set_image(url=self.giveaway_image)
embed.add_field(
name="Giveway ends in",
value=f"{format_relative(discord.utils.utcnow() + timedelta(milliseconds=time_ends))} | [Timer]({LinksAndVars.website.value}/giveaway_timer.html?start={int(time.time() * 1000)}&length={time_ends})",
)
if role:
embed.add_field(name="Role Required", value=f"{role}")
if task:
embed.add_field(name="\U0001f3c1 Tasks", value=task)
newMsg = await channel.send(embed=embed)
embed.set_footer(text=f"Giveaway ID: {newMsg.id}")
await newMsg.edit(embed=embed)
await newMsg.add_reaction(discord.PartialEmoji(name="\U0001f389"))
await ctx.send(
f"Your giveaway will be hosted in {channel.mention} and will last for {answers[1]}\n{newMsg.jump_url}"
)
await self.create_timer_for_giveaway(newMsg, (discord.utils.utcnow() + timedelta(milliseconds=time_ends)).timestamp())
async def determine_winner(self, giveaway_id: discord.Message, bot: commands.Bot) -> Union[str, discord.Member]:
"""Determines winner
:param giveaway_id: The giveaway id
:type giveaway_id: discord.Message
:param bot: The bot class
:type bot: commands.Bot
:return: The winner details
:rtype: Union[str, discord.Member]
"""
reactions = discord.utils.find(lambda a: str(a) == str(discord.PartialEmoji(name="\U0001f389")), giveaway_id.reactions)
if reactions is None:
return "The channel or ID mentioned was incorrect"
try:
giveaway_config = await self.get_giveaway_config(giveaway_id.id if not isinstance(giveaway_id.id, int) else giveaway_id)
except AttributeError as e:
return str(e)
reacted_users = await reactions.users().flatten()
if discord.utils.get(reacted_users, id=bot.application_id):
reacted_users.remove(discord.utils.get(reacted_users, id=bot.application_id))
if giveaway_config.role_required is not None and len(reacted_users) <= 0:
reacted_users = list(filter(lambda a: discord.utils.get(a.roles, id=int(giveaway_config.role_required.lstrip('<@&').lstrip('<&').rstrip('>'))) is not None, reacted_users))
if len(reacted_users) <= 0:
emptyEmbed = Embed(title="\U0001f389\U0001f389 Giveaway Time !! \U0001f389\U0001f389", description="\U0001f381 Win a Prize today")
emptyEmbed.set_author(name=giveaway_config.host.display_name, icon_url=giveaway_config.host.display_avatar.url)
emptyEmbed.add_field(
name="No winners",
value="Not enough participants to determine the winners",
)
emptyEmbed.set_image(url=self.giveaway_image)
emptyEmbed.set_footer(text="No one won the Giveaway")
await giveaway_id.edit(embed=emptyEmbed)
return f"No one won the giveaway! As there were not enough participants!\n{giveaway_config.jump_url}"
winner = choice(reacted_users)
winnerEmbed = giveaway_config.embed
if discord.utils.find(lambda a: a["name"].lower() == "\U0001f389 Winner \U0001f389".lower(), giveaway_config.embed_dict["fields"]) is None:
winnerEmbed.add_field(name="\U0001f389 Winner \U0001f389",value=winner.mention, inline=False)
await giveaway_id.edit(embed=winnerEmbed)
return winner
@command(
name="giftrrl",
usage="<giveaway id> [channel]",
aliases=["gifreroll", "gftroll", "grr","giftroll","giveawayroll", "giveaway_roll","reroll"],
)
@is_mod()
@commands.guild_only()
async def giveaway_reroll(self, ctx: commands.Context, giveaway_id: Union[commands.MessageConverter, discord.Message]):
"""
It picks out the giveaway winners
`Note: It dosen't checks for task, It only checks for roles if specified`
"""
if not await ctx.prompt(
f"Do you really want to **reroll or declare the results for** giveaway with id **{giveaway_id.id}**, hosted in {giveaway_id.channel.mention}?"
):
return
channel = giveaway_id.channel
winner = await self.determine_winner(giveaway_id, ctx.bot)
if isinstance(winner, str):
return await ctx.send(winner)
await channel.send(
f"\U0001f389 Congratulations **{winner.mention}** on winning the Giveaway \U0001f389",
reference=giveaway_id
)
await ctx.send(
giveaway_id.jump_url
)
self.get_giveaway_config.invalidate(self, giveaway_id.id)
@command(
name="giftdel",
usage="<giveaway id>",
aliases=["gifdel", "gftdel", "gdl"],
)
@is_mod()
@commands.guild_only()
async def giveaway_stop(self, ctx: commands.Context, giveaway_id: Union[commands.MessageConverter, discord.Message]):
"""
Cancels the specified giveaway
`Note: This also deletes that giveaway message`
"""
if not await ctx.prompt(
f"Do you really want to **stop/delete** the giveaway with id **{giveaway_id.id}** hosted in {giveaway_id.channel.mention}?\n`Note: This action is irreversible!`"
):
return
try:
await self.get_giveaway_config(giveaway_id.id)
except AttributeError as e:
return await ctx.send(ErrorEmbed(title=str(e)))
database = await self.database_class()
await database.delete(giveaway_id.id)
await giveaway_id.delete()
self.get_giveaway_config.invalidate(self, giveaway_id.id)
await ctx.send(embed=SuccessEmbed(title=f'The giveaway with id {giveaway_id.id} deleted successfully!'))
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent) -> None:
if payload.guild_id is None:
return
reaction = str(payload.emoji)
if reaction != str(discord.PartialEmoji(name="\U0001f389")):
return
if payload.user_id == self.bot.application_id:
return
msg = await (await self.bot.fetch_channel(payload.channel_id)).fetch_message(payload.message_id)
try:
giveaway_config = await self.get_giveaway_config(payload.message_id)
except AttributeError:
return
if giveaway_config.role_required is None:
return
role_present = discord.utils.get(payload.member.roles, id=int(giveaway_config.role_required.lstrip('<@&').lstrip('<&').rstrip('>')))
if role_present is None:
try:
await msg.remove_reaction(discord.PartialEmoji(name="\U0001f389"), payload.member)
await payload.member.send('\U000026a0 Sorry you don\'t have the required roles in order to enter the giveaway :(')
except (discord.HTTPException, discord.Forbidden, discord.InvalidArgument, discord.NotFound):
pass
def setup(bot):
bot.add_cog(Giveaway(bot)) | minato_namikaze/cogs/events/giveaway.py | import json
import time
from asyncio import TimeoutError
from datetime import timedelta
from json.decoder import JSONDecodeError
from random import choice
from typing import Union
import discord
from discord.ext import commands, tasks
from discord.ext.commands import Cog, command
from lib import (FutureTime,
GiveawayConfig, LinksAndVars, cache, convert,
Database, format_relative,
is_mod)
from DiscordUtils import SuccessEmbed, Embed, ErrorEmbed
class Giveaway(Cog):
def __init__(self, bot):
self.bot = bot
self.description = "Helps you to organise a simple giveaway."
self.giveaway_image = LinksAndVars.giveaway_image.value
self.declare_results.start()
@cache()
async def get_giveaway_config(
self,
giveaway_id: discord.Message,
):
return await GiveawayConfig.from_record(giveaway_id, self.bot)
@property
def display_emoji(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name="\N{PARTY POPPER}")
async def database_class(self):
'''The database classs'''
return await self.bot.db.new(Database.database_category_name.value, Database.giveaway_time_channel_name.value)
async def create_timer_for_giveaway(self, giveaway_id: discord.Message, time_ends: Union[int, FutureTime]) -> None:
"""Creates the timer for the giveaway
:param giveaway_id: Giveaway id
:type giveaway_id: discord.Message
:param time_ends: Time when the giveaway will end
:type time_ends: Union[int, FutureTime]
"""
database = await self.database_class()
await database.set(giveaway_id.id, [int(time_ends), giveaway_id.jump_url])
@tasks.loop(minutes=30, reconnect=True)
async def declare_results(self):
database = await self.database_class()
async for message in database._Database__channel.history(limit=None):
cnt = message.content
try:
data = json.loads(str(cnt))
data.pop("type")
data_keys = list(map(str, list(data.keys())))
try:
giveaway_message = await commands.MessageConverter().convert(await self.bot.get_context(message), data[data_keys[0]][1])
timestamp = data[data_keys[0]][0]
if discord.utils.utcnow().timestamp() >= int(timestamp):
winner = await self.determine_winner(giveaway_message, self.bot)
await giveaway_message.channel.send(
f"\U0001f389 Congratulations **{winner.mention}** on winning the Giveaway \U0001f389",
reference=giveaway_message
)
await message.delete()
self.get_giveaway_config.invalidate(self, giveaway_message.id)
except (commands.ChannelNotFound, commands.MessageNotFound, commands.ChannelNotReadable):
await message.delete()
except JSONDecodeError:
await message.delete()
@command(
name="giveaway",
aliases=["gcreate", "gcr", "giftcr"],
)
@commands.guild_only()
@is_mod()
async def create_giveaway(self, ctx: commands.Context):
"""Allowes you to to create giveaway by answering some simple questions!"""
# Ask Questions
embed = Embed(
title="Giveaway Time!! \U00002728",
description="Time for a new Giveaway. Answer the following questions in 25 seconds each for the Giveaway",
color=ctx.author.color,
)
await ctx.send(embed=embed)
questions = [
"In Which channel do you want to host the giveaway?",
"For How long should the Giveaway be hosted ? type number followed (s|m|h|d)",
"What is the Prize?",
"What role should a person must have in order to enter? If no roles required then type `none`",
"Tasks that the person should do in order to participate? If no tasks then type `none`",
]
answers = []
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
for i, question in enumerate(questions):
embed = Embed(title=f"Question {i+1}", description=question)
await ctx.send(embed=embed)
try:
message = await self.bot.wait_for("message",
timeout=25,
check=check)
except TimeoutError:
await ctx.send("You didn't answer the questions in Time")
return
answers.append(message.content)
# Check if Channel Id is valid
try:
channel_id = int(answers[0][2:-1])
except:
await ctx.send(
f"The Channel provided was wrong. The channel provided should be like {ctx.channel.mention}"
)
return
channel = self.bot.get_channel(channel_id)
# Check if the role is valid
role = answers[3]
if role.lower() in ("none", "no", "no roles"):
role = None
else:
try:
int(answers[3][3:-1])
except:
i = ctx.guild.roles
for j in i:
if j.name in ("@everyone", "@here"):
i.remove(j)
bot_roles = choice(i)
await ctx.send(
f"The role provided was wrong. The role should be like {bot_roles.mention}"
)
return
time_ends = convert(answers[1])*1000
# Check if Time is valid
if time == -1:
await ctx.send("The Time format was wrong")
return
if time == -2:
await ctx.send("The Time was not conventional number")
return
prize = answers[2]
task = answers[4]
if task.lower() in ("none", "no", "no task"):
task = None
embed = Embed(
title="**:tada::tada: Giveaway Time !! :tada::tada:**",
description=f":gift: Win a **{prize}** today",
colour=0x00FFFF,
)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar.url)
embed.set_image(url=self.giveaway_image)
embed.add_field(
name="Giveway ends in",
value=f"{format_relative(discord.utils.utcnow() + timedelta(milliseconds=time_ends))} | [Timer]({LinksAndVars.website.value}/giveaway_timer.html?start={int(time.time() * 1000)}&length={time_ends})",
)
if role:
embed.add_field(name="Role Required", value=f"{role}")
if task:
embed.add_field(name="\U0001f3c1 Tasks", value=task)
newMsg = await channel.send(embed=embed)
embed.set_footer(text=f"Giveaway ID: {newMsg.id}")
await newMsg.edit(embed=embed)
await newMsg.add_reaction(discord.PartialEmoji(name="\U0001f389"))
await ctx.send(
f"Your giveaway will be hosted in {channel.mention} and will last for {answers[1]}\n{newMsg.jump_url}"
)
await self.create_timer_for_giveaway(newMsg, (discord.utils.utcnow() + timedelta(milliseconds=time_ends)).timestamp())
async def determine_winner(self, giveaway_id: discord.Message, bot: commands.Bot) -> Union[str, discord.Member]:
"""Determines winner
:param giveaway_id: The giveaway id
:type giveaway_id: discord.Message
:param bot: The bot class
:type bot: commands.Bot
:return: The winner details
:rtype: Union[str, discord.Member]
"""
reactions = discord.utils.find(lambda a: str(a) == str(discord.PartialEmoji(name="\U0001f389")), giveaway_id.reactions)
if reactions is None:
return "The channel or ID mentioned was incorrect"
try:
giveaway_config = await self.get_giveaway_config(giveaway_id.id if not isinstance(giveaway_id.id, int) else giveaway_id)
except AttributeError as e:
return str(e)
reacted_users = await reactions.users().flatten()
if discord.utils.get(reacted_users, id=bot.application_id):
reacted_users.remove(discord.utils.get(reacted_users, id=bot.application_id))
if giveaway_config.role_required is not None and len(reacted_users) <= 0:
reacted_users = list(filter(lambda a: discord.utils.get(a.roles, id=int(giveaway_config.role_required.lstrip('<@&').lstrip('<&').rstrip('>'))) is not None, reacted_users))
if len(reacted_users) <= 0:
emptyEmbed = Embed(title="\U0001f389\U0001f389 Giveaway Time !! \U0001f389\U0001f389", description="\U0001f381 Win a Prize today")
emptyEmbed.set_author(name=giveaway_config.host.display_name, icon_url=giveaway_config.host.display_avatar.url)
emptyEmbed.add_field(
name="No winners",
value="Not enough participants to determine the winners",
)
emptyEmbed.set_image(url=self.giveaway_image)
emptyEmbed.set_footer(text="No one won the Giveaway")
await giveaway_id.edit(embed=emptyEmbed)
return f"No one won the giveaway! As there were not enough participants!\n{giveaway_config.jump_url}"
winner = choice(reacted_users)
winnerEmbed = giveaway_config.embed
if discord.utils.find(lambda a: a["name"].lower() == "\U0001f389 Winner \U0001f389".lower(), giveaway_config.embed_dict["fields"]) is None:
winnerEmbed.add_field(name="\U0001f389 Winner \U0001f389",value=winner.mention, inline=False)
await giveaway_id.edit(embed=winnerEmbed)
return winner
@command(
name="giftrrl",
usage="<giveaway id> [channel]",
aliases=["gifreroll", "gftroll", "grr","giftroll","giveawayroll", "giveaway_roll","reroll"],
)
@is_mod()
@commands.guild_only()
async def giveaway_reroll(self, ctx: commands.Context, giveaway_id: Union[commands.MessageConverter, discord.Message]):
"""
It picks out the giveaway winners
`Note: It dosen't checks for task, It only checks for roles if specified`
"""
if not await ctx.prompt(
f"Do you really want to **reroll or declare the results for** giveaway with id **{giveaway_id.id}**, hosted in {giveaway_id.channel.mention}?"
):
return
channel = giveaway_id.channel
winner = await self.determine_winner(giveaway_id, ctx.bot)
if isinstance(winner, str):
return await ctx.send(winner)
await channel.send(
f"\U0001f389 Congratulations **{winner.mention}** on winning the Giveaway \U0001f389",
reference=giveaway_id
)
await ctx.send(
giveaway_id.jump_url
)
self.get_giveaway_config.invalidate(self, giveaway_id.id)
@command(
name="giftdel",
usage="<giveaway id>",
aliases=["gifdel", "gftdel", "gdl"],
)
@is_mod()
@commands.guild_only()
async def giveaway_stop(self, ctx: commands.Context, giveaway_id: Union[commands.MessageConverter, discord.Message]):
"""
Cancels the specified giveaway
`Note: This also deletes that giveaway message`
"""
if not await ctx.prompt(
f"Do you really want to **stop/delete** the giveaway with id **{giveaway_id.id}** hosted in {giveaway_id.channel.mention}?\n`Note: This action is irreversible!`"
):
return
try:
await self.get_giveaway_config(giveaway_id.id)
except AttributeError as e:
return await ctx.send(ErrorEmbed(title=str(e)))
database = await self.database_class()
await database.delete(giveaway_id.id)
await giveaway_id.delete()
self.get_giveaway_config.invalidate(self, giveaway_id.id)
await ctx.send(embed=SuccessEmbed(title=f'The giveaway with id {giveaway_id.id} deleted successfully!'))
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent) -> None:
if payload.guild_id is None:
return
reaction = str(payload.emoji)
if reaction != str(discord.PartialEmoji(name="\U0001f389")):
return
if payload.user_id == self.bot.application_id:
return
msg = await (await self.bot.fetch_channel(payload.channel_id)).fetch_message(payload.message_id)
try:
giveaway_config = await self.get_giveaway_config(payload.message_id)
except AttributeError:
return
if giveaway_config.role_required is None:
return
role_present = discord.utils.get(payload.member.roles, id=int(giveaway_config.role_required.lstrip('<@&').lstrip('<&').rstrip('>')))
if role_present is None:
try:
await msg.remove_reaction(discord.PartialEmoji(name="\U0001f389"), payload.member)
await payload.member.send('\U000026a0 Sorry you don\'t have the required roles in order to enter the giveaway :(')
except (discord.HTTPException, discord.Forbidden, discord.InvalidArgument, discord.NotFound):
pass
def setup(bot):
bot.add_cog(Giveaway(bot)) | 0.65202 | 0.157947 |
from __future__ import division
import numpy as np
import scipy as sp
from scipy import ndimage
from scipy.sparse.csgraph import connected_components
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.testing import (assert_equal, assert_true, assert_raises,
ignore_warnings)
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert connected_components(A)[0] == 2
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert connected_components(A)[0] == 1
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert A.dtype == np.bool
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert A.dtype == np.int
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert A.dtype == np.float64
@ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
@ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_extract_patch_same_size_image():
face = downsampled_face
# Request patches of the same size as image
# Should return just the single patch a.k.a. the image
patches = extract_patches_2d(face, face.shape, max_patches=2)
assert_equal(patches.shape[0], 1)
def test_extract_patches_less_than_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 3 * i_h // 4, 3 * i_w // 4
# this is 3185
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert extr == extr.fit(faces)
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert patches.shape[:ndim] == expected_view
last_patch_slices = tuple(slice(i, i + j, None) for i, j in
zip(last_patch, patch_size))
assert_true((patches[(-1, None, None) * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4)) | env/lib/python3.5/site-packages/sklearn/feature_extraction/tests/test_image.py |
from __future__ import division
import numpy as np
import scipy as sp
from scipy import ndimage
from scipy.sparse.csgraph import connected_components
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.testing import (assert_equal, assert_true, assert_raises,
ignore_warnings)
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert connected_components(A)[0] == 2
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert connected_components(A)[0] == 1
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert A.dtype == np.bool
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert A.dtype == np.int
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert A.dtype == np.float64
@ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
@ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_extract_patch_same_size_image():
face = downsampled_face
# Request patches of the same size as image
# Should return just the single patch a.k.a. the image
patches = extract_patches_2d(face, face.shape, max_patches=2)
assert_equal(patches.shape[0], 1)
def test_extract_patches_less_than_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 3 * i_h // 4, 3 * i_w // 4
# this is 3185
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert extr == extr.fit(faces)
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert patches.shape[:ndim] == expected_view
last_patch_slices = tuple(slice(i, i + j, None) for i, j in
zip(last_patch, patch_size))
assert_true((patches[(-1, None, None) * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4)) | 0.828141 | 0.650453 |
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE) | fdfs_client/tracker_client.py |
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE) | 0.257578 | 0.057945 |
import os
import tempfile
from contextlib import contextmanager
from typing import Generator, Optional
from unittest.mock import patch, Mock, call
import pytest # type: ignore
from click.testing import CliRunner, Result
import purgeraw.main
from purgeraw.index_extraction import indexer
class TestMain:
@contextmanager
def make_test_dir(self) -> Generator[str, None, None]:
tempdir: Optional[str] = None
try:
tempdir = tempfile.mkdtemp("_purgeraw")
yield tempdir
finally:
if tempdir is not None:
os.rmdir(tempdir)
@pytest.fixture
def runner(self) -> CliRunner:
return CliRunner()
def test_when_missing_input_dir_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, [])
assert result.exit_code == 2
assert "Error: Missing argument '<directory>'." in result.output
def test_when_input_dir_not_exists_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, ["/flibble1212"])
assert result.exit_code == 2
assert "Path '/flibble1212' does not exist." in result.output
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.fake_deleter.__name__)
def test_when_input_dir_present_then_walker_purger_and_fake_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.deleter.__name__)
def test_when_input_dir_present_with_delete_then_walker_purger_and_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-d"])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_raw_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-r", "cr2", "-r", "raw"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr2", "raw", "jpg"])
assert purger_mock.call_args_list[0] == call(["cr2", "raw"], indexer)
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_processed_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-p", "png"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr3", "xmp", "png"]) | tests/test_main.py | import os
import tempfile
from contextlib import contextmanager
from typing import Generator, Optional
from unittest.mock import patch, Mock, call
import pytest # type: ignore
from click.testing import CliRunner, Result
import purgeraw.main
from purgeraw.index_extraction import indexer
class TestMain:
@contextmanager
def make_test_dir(self) -> Generator[str, None, None]:
tempdir: Optional[str] = None
try:
tempdir = tempfile.mkdtemp("_purgeraw")
yield tempdir
finally:
if tempdir is not None:
os.rmdir(tempdir)
@pytest.fixture
def runner(self) -> CliRunner:
return CliRunner()
def test_when_missing_input_dir_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, [])
assert result.exit_code == 2
assert "Error: Missing argument '<directory>'." in result.output
def test_when_input_dir_not_exists_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, ["/flibble1212"])
assert result.exit_code == 2
assert "Path '/flibble1212' does not exist." in result.output
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.fake_deleter.__name__)
def test_when_input_dir_present_then_walker_purger_and_fake_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.deleter.__name__)
def test_when_input_dir_present_with_delete_then_walker_purger_and_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-d"])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_raw_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-r", "cr2", "-r", "raw"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr2", "raw", "jpg"])
assert purger_mock.call_args_list[0] == call(["cr2", "raw"], indexer)
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_processed_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-p", "png"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr3", "xmp", "png"]) | 0.63624 | 0.269977 |
import cv2
import numpy as np
import re
# Importing our dependencies
import util as ut
import svm_train as st
import time
# create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
model = st.trainSVM(9, 20, 'TrainData2')
move_text = {'1': 'GRAB', '2': 'Bless', '3': 'Rock', '4': 'Stop', '5': 'ThumbsUp', '6': 'Victory', '7': 'Stop2',
'8': 'Left', '9': 'Right'}
# Camera and font initialization
cam = int(input("Enter Camera Index : "))
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
temp = 0
previouslabel = None
previousText = " "
label = None
text = ""
while (cap.isOpened()):
move = ''
t = time.time()
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, th1 = cv2.threshold(gray.copy(), 150, 255, cv2.THRESH_TOZERO)
cv2.imshow('thresh', th1)
_, contours, hierarchy = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL, 2)
cnt = ut.getMaxContour(contours, 4000)
if cnt.any() != None:
gesture, label = ut.getGestureImg(cnt, img, th1, model)
if label != None:
if temp == 0:
previouslabel = label
if previouslabel == label:
previouslabel = label
temp += 1
else:
temp = 0
if (temp == 40):
if (label == 'P'):
label = " "
text += label
if (label == 'Q'):
words = re.split(" +", text)
words.pop()
text = " ".join(words)
# text=previousText
print(text)
cv2.imshow('PredictedGesture', cv2.imread('TrainData2/' + label + '_1.jpg')) # showing the best match or prediction
cv2.putText(img, label, (50, 150), font, 8, (0, 125, 155),
2) # displaying the predicted letter on the main screen
cv2.putText(img, text, (50, 450), font, 3, (0, 0, 255), 2)
fps = int(1 / (time.time() - t))
cv2.putText(img, "FPS: " + str(fps) + move, (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Frame', img)
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | hand_pose.py | import cv2
import numpy as np
import re
# Importing our dependencies
import util as ut
import svm_train as st
import time
# create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
model = st.trainSVM(9, 20, 'TrainData2')
move_text = {'1': 'GRAB', '2': 'Bless', '3': 'Rock', '4': 'Stop', '5': 'ThumbsUp', '6': 'Victory', '7': 'Stop2',
'8': 'Left', '9': 'Right'}
# Camera and font initialization
cam = int(input("Enter Camera Index : "))
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
temp = 0
previouslabel = None
previousText = " "
label = None
text = ""
while (cap.isOpened()):
move = ''
t = time.time()
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, th1 = cv2.threshold(gray.copy(), 150, 255, cv2.THRESH_TOZERO)
cv2.imshow('thresh', th1)
_, contours, hierarchy = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL, 2)
cnt = ut.getMaxContour(contours, 4000)
if cnt.any() != None:
gesture, label = ut.getGestureImg(cnt, img, th1, model)
if label != None:
if temp == 0:
previouslabel = label
if previouslabel == label:
previouslabel = label
temp += 1
else:
temp = 0
if (temp == 40):
if (label == 'P'):
label = " "
text += label
if (label == 'Q'):
words = re.split(" +", text)
words.pop()
text = " ".join(words)
# text=previousText
print(text)
cv2.imshow('PredictedGesture', cv2.imread('TrainData2/' + label + '_1.jpg')) # showing the best match or prediction
cv2.putText(img, label, (50, 150), font, 8, (0, 125, 155),
2) # displaying the predicted letter on the main screen
cv2.putText(img, text, (50, 450), font, 3, (0, 0, 255), 2)
fps = int(1 / (time.time() - t))
cv2.putText(img, "FPS: " + str(fps) + move, (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Frame', img)
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | 0.241221 | 0.133613 |
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
ARGS,
CLUSTER_ID,
COMMAND,
COMMAND_MOVE,
COMMAND_MOVE_ON_OFF,
COMMAND_RELEASE,
COMMAND_STEP,
COMMAND_STEP_ON_OFF,
COMMAND_TOGGLE,
DEVICE_TYPE,
DIM_DOWN,
DIM_UP,
ENDPOINT_ID,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PARAMS,
PROFILE_ID,
SHORT_PRESS,
TURN_ON,
)
from zhaquirks.lds import MANUFACTURER, LightLinkCluster
class CCTSwitch(CustomDevice):
"""Custom device representing CCTSwitch-D0001 remote control."""
signature = {
# <SimpleDescriptor endpoint = 1 profile = 260 device_type = 2048
# device_version = 1 input_clusters = [0, 1, 3, 4096, 64769]
# output_clusters = [3, 4, 6, 8, 25, 768, 4096] >
MODELS_INFO: [(MANUFACTURER, "ZBT-CCTSwitch-D0001")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
LightLink.cluster_id,
0xFD01,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
LightLinkCluster,
0xFD01,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
}
}
}
device_automation_triggers = {
(SHORT_PRESS, TURN_ON): {
COMMAND: COMMAND_TOGGLE,
CLUSTER_ID: 6,
ENDPOINT_ID: 1,
},
(LONG_PRESS, TURN_ON): {
COMMAND: COMMAND_RELEASE,
CLUSTER_ID: 5,
ENDPOINT_ID: 1,
ARGS: [],
},
(SHORT_PRESS, DIM_UP): {
COMMAND: COMMAND_STEP_ON_OFF,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"step_mode": 0},
},
(LONG_PRESS, DIM_UP): {
COMMAND: COMMAND_MOVE_ON_OFF,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"move_mode": 0},
},
(SHORT_PRESS, DIM_DOWN): {
COMMAND: COMMAND_STEP,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"step_mode": 1},
},
(LONG_PRESS, DIM_DOWN): {
COMMAND: COMMAND_MOVE,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"move_mode": 1},
},
} | zhaquirks/lds/cctswitch.py | from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
ARGS,
CLUSTER_ID,
COMMAND,
COMMAND_MOVE,
COMMAND_MOVE_ON_OFF,
COMMAND_RELEASE,
COMMAND_STEP,
COMMAND_STEP_ON_OFF,
COMMAND_TOGGLE,
DEVICE_TYPE,
DIM_DOWN,
DIM_UP,
ENDPOINT_ID,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PARAMS,
PROFILE_ID,
SHORT_PRESS,
TURN_ON,
)
from zhaquirks.lds import MANUFACTURER, LightLinkCluster
class CCTSwitch(CustomDevice):
"""Custom device representing CCTSwitch-D0001 remote control."""
signature = {
# <SimpleDescriptor endpoint = 1 profile = 260 device_type = 2048
# device_version = 1 input_clusters = [0, 1, 3, 4096, 64769]
# output_clusters = [3, 4, 6, 8, 25, 768, 4096] >
MODELS_INFO: [(MANUFACTURER, "ZBT-CCTSwitch-D0001")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
LightLink.cluster_id,
0xFD01,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
LightLinkCluster,
0xFD01,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
}
}
}
device_automation_triggers = {
(SHORT_PRESS, TURN_ON): {
COMMAND: COMMAND_TOGGLE,
CLUSTER_ID: 6,
ENDPOINT_ID: 1,
},
(LONG_PRESS, TURN_ON): {
COMMAND: COMMAND_RELEASE,
CLUSTER_ID: 5,
ENDPOINT_ID: 1,
ARGS: [],
},
(SHORT_PRESS, DIM_UP): {
COMMAND: COMMAND_STEP_ON_OFF,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"step_mode": 0},
},
(LONG_PRESS, DIM_UP): {
COMMAND: COMMAND_MOVE_ON_OFF,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"move_mode": 0},
},
(SHORT_PRESS, DIM_DOWN): {
COMMAND: COMMAND_STEP,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"step_mode": 1},
},
(LONG_PRESS, DIM_DOWN): {
COMMAND: COMMAND_MOVE,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
PARAMS: {"move_mode": 1},
},
} | 0.575946 | 0.135861 |
from zope.interface import (
Attribute,
Interface,
)
# public API
class IHandler(Interface):
"""A Callback Handler for Zookeeper completion and watch callbacks
This object must implement several methods responsible for
determining how completion / watch callbacks are handled as well as
the method for calling :class:`IAsyncResult` callback functions.
These functions are used to abstract differences between a Python
threading environment and asynchronous single-threaded environments
like gevent. The minimum functionality needed for Kazoo to handle
these differences are encompassed in this interface.
The Handler should document how callbacks are called for:
* Zookeeper completion events
* Zookeeper watch events
"""
name = Attribute(
"""Human readable name of the Handler interface""")
timeout_exception = Attribute(
"""Exception class that should be thrown and captured if a
result is not available within the given time""")
sleep_func = Attribute(
"""Appropriate sleep function that can be called with a single
argument and sleep.""")
empty = Attribute(
"""Exception class that should be thrown and captured if the
queue is empty within the given time""")
def start():
"""Start the handler, used for setting up the handler."""
def stop():
"""Stop the handler. Should block until the handler is safely
stopped."""
def select():
"""A select method that implements Python's select.select
API"""
def socket():
"""A socket method that implements Python's socket.socket
API"""
def peekable_queue():
"""Return an appropriate object that implements Python's
Queue.Queue API with a .peek method"""
def event_object():
"""Return an appropriate object that implements Python's
threading.Event API"""
def lock_object():
"""Return an appropriate object that implements Python's
threading.Lock API"""
def rlock_object():
"""Return an appropriate object that implements Python's
threading.RLock API"""
def async_result():
"""Return an instance that conforms to the
:class:`~IAsyncResult` interface appropriate for this
handler"""
def spawn(func, *args, **kwargs):
"""Spawn a function to run asynchronously
:param args: args to call the function with.
:param kwargs: keyword args to call the function with.
This method should return immediately and execute the function
with the provided args and kwargs in an asynchronous manner.
"""
def dispatch_callback(callback):
"""Dispatch to the callback object
:param callback: A :class:`~kazoo.protocol.states.Callback`
object to be called.
"""
class IAsyncResult(Interface):
"""An Async Result object that can be queried for a value that has
been set asyncronously
This object is modeled on the ``gevent`` AsyncResult object.
The implementation must account for the fact that the :meth:`set`
and :meth:`set_exception` methods will be called from within the
Zookeeper thread which may require extra care under asynchronous
environments.
"""
value = Attribute(
"""Holds the value passed to :meth:`set` if :meth:`set` was
called. Otherwise `None`""")
exception = Attribute(
"""Holds the exception instance passed to :meth:`set_exception`
if :meth:`set_exception` was called. Otherwise `None`""")
def ready():
"""Return `True` if and only if it holds a value or an
exception"""
def successful():
"""Return `True` if and only if it is ready and holds a
value"""
def set(value=None):
"""Store the value. Wake up the waiters.
:param value: Value to store as the result.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def set_exception(exception):
"""Store the exception. Wake up the waiters.
:param exception: Exception to raise when fetching the value.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def get(block=True, timeout=None):
"""Return the stored value or raise the exception
:param block: Whether this method should block or return
immediately.
:type block: bool
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def get_nowait():
"""Return the value or raise the exception without blocking.
If nothing is available, raise the Timeout exception class on
the associated :class:`IHandler` interface."""
def wait(timeout=None):
"""Block until the instance is ready.
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def rawlink(callback):
"""Register a callback to call when a value or an exception is
set
:param callback:
A callback function to call after :meth:`set` or
:meth:`set_exception` has been called. This function will
be passed a single argument, this instance.
:type callback: func
"""
def unlink(callback):
"""Remove the callback set by :meth:`rawlink`
:param callback: A callback function to remove.
:type callback: func
""" | kazoo/interfaces.py | from zope.interface import (
Attribute,
Interface,
)
# public API
class IHandler(Interface):
"""A Callback Handler for Zookeeper completion and watch callbacks
This object must implement several methods responsible for
determining how completion / watch callbacks are handled as well as
the method for calling :class:`IAsyncResult` callback functions.
These functions are used to abstract differences between a Python
threading environment and asynchronous single-threaded environments
like gevent. The minimum functionality needed for Kazoo to handle
these differences are encompassed in this interface.
The Handler should document how callbacks are called for:
* Zookeeper completion events
* Zookeeper watch events
"""
name = Attribute(
"""Human readable name of the Handler interface""")
timeout_exception = Attribute(
"""Exception class that should be thrown and captured if a
result is not available within the given time""")
sleep_func = Attribute(
"""Appropriate sleep function that can be called with a single
argument and sleep.""")
empty = Attribute(
"""Exception class that should be thrown and captured if the
queue is empty within the given time""")
def start():
"""Start the handler, used for setting up the handler."""
def stop():
"""Stop the handler. Should block until the handler is safely
stopped."""
def select():
"""A select method that implements Python's select.select
API"""
def socket():
"""A socket method that implements Python's socket.socket
API"""
def peekable_queue():
"""Return an appropriate object that implements Python's
Queue.Queue API with a .peek method"""
def event_object():
"""Return an appropriate object that implements Python's
threading.Event API"""
def lock_object():
"""Return an appropriate object that implements Python's
threading.Lock API"""
def rlock_object():
"""Return an appropriate object that implements Python's
threading.RLock API"""
def async_result():
"""Return an instance that conforms to the
:class:`~IAsyncResult` interface appropriate for this
handler"""
def spawn(func, *args, **kwargs):
"""Spawn a function to run asynchronously
:param args: args to call the function with.
:param kwargs: keyword args to call the function with.
This method should return immediately and execute the function
with the provided args and kwargs in an asynchronous manner.
"""
def dispatch_callback(callback):
"""Dispatch to the callback object
:param callback: A :class:`~kazoo.protocol.states.Callback`
object to be called.
"""
class IAsyncResult(Interface):
"""An Async Result object that can be queried for a value that has
been set asyncronously
This object is modeled on the ``gevent`` AsyncResult object.
The implementation must account for the fact that the :meth:`set`
and :meth:`set_exception` methods will be called from within the
Zookeeper thread which may require extra care under asynchronous
environments.
"""
value = Attribute(
"""Holds the value passed to :meth:`set` if :meth:`set` was
called. Otherwise `None`""")
exception = Attribute(
"""Holds the exception instance passed to :meth:`set_exception`
if :meth:`set_exception` was called. Otherwise `None`""")
def ready():
"""Return `True` if and only if it holds a value or an
exception"""
def successful():
"""Return `True` if and only if it is ready and holds a
value"""
def set(value=None):
"""Store the value. Wake up the waiters.
:param value: Value to store as the result.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def set_exception(exception):
"""Store the exception. Wake up the waiters.
:param exception: Exception to raise when fetching the value.
Any waiters blocking on :meth:`get` or :meth:`wait` are woken
up. Sequential calls to :meth:`wait` and :meth:`get` will not
block at all."""
def get(block=True, timeout=None):
"""Return the stored value or raise the exception
:param block: Whether this method should block or return
immediately.
:type block: bool
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def get_nowait():
"""Return the value or raise the exception without blocking.
If nothing is available, raise the Timeout exception class on
the associated :class:`IHandler` interface."""
def wait(timeout=None):
"""Block until the instance is ready.
:param timeout: How long to wait for a value when `block` is
`True`.
:type timeout: float
If this instance already holds a value / an exception, return /
raise it immediately. Otherwise, block until :meth:`set` or
:meth:`set_exception` has been called or until the optional
timeout occurs."""
def rawlink(callback):
"""Register a callback to call when a value or an exception is
set
:param callback:
A callback function to call after :meth:`set` or
:meth:`set_exception` has been called. This function will
be passed a single argument, this instance.
:type callback: func
"""
def unlink(callback):
"""Remove the callback set by :meth:`rawlink`
:param callback: A callback function to remove.
:type callback: func
""" | 0.903341 | 0.530176 |
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.firefox.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from config import Config
from utils.helpers import get_random_file_entry
class Driver:
config = Config()
extension_path = ""
@staticmethod
def chrome():
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
options.add_argument('--disable-popup-blocking')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-notifications')
options.add_argument('--verbose')
options.add_argument('—no-sandbox')
options.add_argument('—disable-gpu')
options.add_argument("user-data-dir=selenium")
options.add_argument(f'--proxy-server=')
# Load Chrome extension
# Reference: https://coreygoldberg.blogspot.com/2018/09/python-using-chrome-extensions-with.html
options.add_argument(
'--load-extension={}'.format(Driver.extension_path)
)
options.add_argument(
f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36')
options.add_experimental_option("prefs", {
'download.default_directory': Driver.config.DOWNLOAD_DIR,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': True,
'useAutomationExtension': False,
'excludeSwitches': ['enable-automation'],
'disk-cache-size': 4096
})
driver = webdriver.Chrome(
ChromeDriverManager().install(),
options=options
)
driver.set_page_load_timeout(120)
driver.execute_script(
"var s=window.document.createElement('script'); s.src='javascript.js';window.document.head.appendChild(s);")
return driver
@staticmethod
def firefox():
profile = webdriver.FirefoxOptions()
profile.set_preference("dom.push.enabled", False)
profile.headless = Driver.config.INTERFACE
profile.set_preference("browser.download.panel.shown", False)
profile.set_preference(
"browser.helperApps.neverAsk.openFile", "text/csv,application/vnd.ms-excel")
profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk", "text/csv,application/vnd.ms-excel")
profile.set_preference("browser.download.folderList", 2)
profile.set_preference(
"browser.download.dir", Driver.config.DOWNLOAD_DIR)
driver = webdriver.Firefox(
GeckoDriverManager().install(),
firefox_options=profile
)
driver.maximize_window()
driver.set_page_load_timeout(120)
#firefox_set_proxy()
return driver
def firefox_set_proxy(self):
driver = Driver.firefox()
proxy = get_random_file_entry(Driver.config.PROXY_FILE)
proxy = get_proxy()['proxy'].split(":")
host = proxy[0]
port = int(proxy[1])
driver.execute("SET_CONTEXT", {"context": "chrome"})
try:
driver.execute_script("""
Services.prefs.setIntPref('network.proxy.type', 1);
Services.prefs.setCharPref("network.proxy.http", arguments[0]);
Services.prefs.setIntPref("network.proxy.http_port", arguments[1]);
Services.prefs.setCharPref("network.proxy.ssl", arguments[0]);
Services.prefs.setIntPref("network.proxy.ssl_port", arguments[1]);
""", host, port)
finally:
driver.execute("SET_CONTEXT", {"context": "content"}) | {{cookiecutter.project_name}}/utils/Driver.py | from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.firefox.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from config import Config
from utils.helpers import get_random_file_entry
class Driver:
config = Config()
extension_path = ""
@staticmethod
def chrome():
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
options.add_argument('--disable-popup-blocking')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-notifications')
options.add_argument('--verbose')
options.add_argument('—no-sandbox')
options.add_argument('—disable-gpu')
options.add_argument("user-data-dir=selenium")
options.add_argument(f'--proxy-server=')
# Load Chrome extension
# Reference: https://coreygoldberg.blogspot.com/2018/09/python-using-chrome-extensions-with.html
options.add_argument(
'--load-extension={}'.format(Driver.extension_path)
)
options.add_argument(
f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36')
options.add_experimental_option("prefs", {
'download.default_directory': Driver.config.DOWNLOAD_DIR,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': True,
'useAutomationExtension': False,
'excludeSwitches': ['enable-automation'],
'disk-cache-size': 4096
})
driver = webdriver.Chrome(
ChromeDriverManager().install(),
options=options
)
driver.set_page_load_timeout(120)
driver.execute_script(
"var s=window.document.createElement('script'); s.src='javascript.js';window.document.head.appendChild(s);")
return driver
@staticmethod
def firefox():
profile = webdriver.FirefoxOptions()
profile.set_preference("dom.push.enabled", False)
profile.headless = Driver.config.INTERFACE
profile.set_preference("browser.download.panel.shown", False)
profile.set_preference(
"browser.helperApps.neverAsk.openFile", "text/csv,application/vnd.ms-excel")
profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk", "text/csv,application/vnd.ms-excel")
profile.set_preference("browser.download.folderList", 2)
profile.set_preference(
"browser.download.dir", Driver.config.DOWNLOAD_DIR)
driver = webdriver.Firefox(
GeckoDriverManager().install(),
firefox_options=profile
)
driver.maximize_window()
driver.set_page_load_timeout(120)
#firefox_set_proxy()
return driver
def firefox_set_proxy(self):
driver = Driver.firefox()
proxy = get_random_file_entry(Driver.config.PROXY_FILE)
proxy = get_proxy()['proxy'].split(":")
host = proxy[0]
port = int(proxy[1])
driver.execute("SET_CONTEXT", {"context": "chrome"})
try:
driver.execute_script("""
Services.prefs.setIntPref('network.proxy.type', 1);
Services.prefs.setCharPref("network.proxy.http", arguments[0]);
Services.prefs.setIntPref("network.proxy.http_port", arguments[1]);
Services.prefs.setCharPref("network.proxy.ssl", arguments[0]);
Services.prefs.setIntPref("network.proxy.ssl_port", arguments[1]);
""", host, port)
finally:
driver.execute("SET_CONTEXT", {"context": "content"}) | 0.456652 | 0.044183 |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import config as cfg
class Lenet:
def __init__(self):
self.raw_input_image = tf.placeholder(tf.float32, [None, 784])
self.input_images = tf.reshape(self.raw_input_image, [-1, 28, 28, 1])
self.raw_input_label = tf.placeholder("float", [None, 10])
self.input_labels = tf.cast(self.raw_input_label, tf.int32)
self.dropout = cfg.KEEP_PROB
with tf.variable_scope("Lenet") as scope:
self.train_digits = self.construct_net(True)
scope.reuse_variables()
self.pred_digits = self.construct_net(False)
self.prediction = tf.argmax(self.pred_digits, 1)
self.correct_prediction = tf.equal(tf.argmax(self.pred_digits, 1), tf.argmax(self.input_labels, 1))
self.train_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
self.loss = slim.losses.softmax_cross_entropy(self.train_digits, self.input_labels)
self.lr = cfg.LEARNING_RATE
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def construct_net(self, is_trained=True):
with slim.arg_scope([slim.conv2d], padding='VALID',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(self.input_images, 6, [5, 5], 1, padding='SAME', scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 16, [5, 5], 1, scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.conv2d(net, 120, [5, 5], 1, scope='conv5')
net = slim.flatten(net, scope='flat6')
net = slim.fully_connected(net, 84, scope='fc7')
net = slim.dropout(net, self.dropout, is_training=is_trained, scope='dropout8')
digits = slim.fully_connected(net, 10, scope='fc9')
return digits | lenet.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import config as cfg
class Lenet:
def __init__(self):
self.raw_input_image = tf.placeholder(tf.float32, [None, 784])
self.input_images = tf.reshape(self.raw_input_image, [-1, 28, 28, 1])
self.raw_input_label = tf.placeholder("float", [None, 10])
self.input_labels = tf.cast(self.raw_input_label, tf.int32)
self.dropout = cfg.KEEP_PROB
with tf.variable_scope("Lenet") as scope:
self.train_digits = self.construct_net(True)
scope.reuse_variables()
self.pred_digits = self.construct_net(False)
self.prediction = tf.argmax(self.pred_digits, 1)
self.correct_prediction = tf.equal(tf.argmax(self.pred_digits, 1), tf.argmax(self.input_labels, 1))
self.train_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
self.loss = slim.losses.softmax_cross_entropy(self.train_digits, self.input_labels)
self.lr = cfg.LEARNING_RATE
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def construct_net(self, is_trained=True):
with slim.arg_scope([slim.conv2d], padding='VALID',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(self.input_images, 6, [5, 5], 1, padding='SAME', scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 16, [5, 5], 1, scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.conv2d(net, 120, [5, 5], 1, scope='conv5')
net = slim.flatten(net, scope='flat6')
net = slim.fully_connected(net, 84, scope='fc7')
net = slim.dropout(net, self.dropout, is_training=is_trained, scope='dropout8')
digits = slim.fully_connected(net, 10, scope='fc9')
return digits | 0.783077 | 0.330174 |
#------------------------------
import numpy as np
#------------------------------
def hist_values(nda) :
"""Depending on nda.dtype fills/returns 1-D 2^8(16)-bin histogram-array of 8(16)-bit values of input n-d array
"""
#print '%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype))
if nda.dtype == np.uint8 :
return np.bincount(nda.flatten(), weights=None, minlength=1<<8)
elif nda.dtype == np.uint16 :
return np.bincount(nda.flatten(), weights=None, minlength=1<<16)
elif nda.dtype == np.int16 :
unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
return np.bincount(unda.flatten(), weights=None, minlength=1<<16)
else :
sys.exit('method %s get unexpected nda dtype=%s. Use np.uint8 or np.(u)int16'%(FR().f_code.co_name, str(nda.dtype)))
#------------------------------
def hist_probabilities(nda) :
"""Returns histogram-array of probabilities for each of (u)int16, uint8 intensity
"""
#print('%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype)))
nvals = nda.size
ph = np.array(hist_values(nda), dtype=np.float)
ph /= nvals
#print('Check sum of probabilities: %.6f for number of values in array = %d' % (ph.sum(), nvals))
return ph
#------------------------------
def entropy(nda) :
"""Evaluates n-d array entropy using formula from https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
"""
unda = None # histogram array indexes must be unsigned
if nda.dtype == np.uint8 : unda = nda
elif nda.dtype == np.uint16: unda = nda
elif nda.dtype == np.int16 : unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
prob_h = hist_probabilities(unda)
p_log2p_nda = [p*np.log2(p) for p in prob_h if p>0]
ent = -np.sum(p_log2p_nda)
#print_ndarr(hist_values(nda), name='Histogram of uint16 values', first=1500, last=1520)
#print_ndarr(prob_h, name='Histogram of probabilities', first=1500, last=1520)
#print_ndarr(prob_nda, name='per pixel array of probabilities\n', first=1000, last=1010)
#print_ndarr(p_log2p_nda, name='per pixel array of P*log2(P)\n', first=1000, last=1010)
return ent
#------------------------------
## formula in https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
## sums over all (x_i) which is a set of possible values....
## this method sums over set (one entry) of probabilities
#------------------------------
def entropy_v1(nda) :
"""The same as entropy(nda) in a single place.
"""
#print('%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype)))
unda = nda
if nda.dtype == np.uint8 : unda = nda
elif nda.dtype == np.uint16 : unda = nda
elif nda.dtype == np.int16 : unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
else : sys.exit('method %s get unexpected nda dtype=%s. Use np.uint8 or np.(u)int16'%(FR().f_code.co_name, str(nda.dtype)))
hsize = (1<<8) if nda.dtype == np.uint8 else (1<<16)
vals_h = np.bincount(unda.flatten(), weights=None, minlength=hsize)
prob_h = np.array(vals_h, dtype=np.float) / unda.size
#prob_nda = prob_h[unda]
#p_log2p_nda = prob_nda * np.log2(prob_nda)
#ent = -p_log2p_nda.sum()
p_log2p_nda = [p*np.log2(p) for p in prob_h if p>0]
ent = -np.sum(p_log2p_nda)
return ent
#------------------------------
def entropy_cpo(signal):
'''Entropy evaluation method found by cpo on web
Function returns entropy of a signal, which is 1-D numpy array
'''
lensig=signal.size
symset=list(set(signal))
numsym=len(symset)
propab=[np.size(signal[signal==i])/(1.0*lensig) for i in symset]
ent=np.sum([p*np.log2(1.0/p) for p in propab])
return ent
#------------------------------
#------------------------------
#------------------------------
def test_entropy():
print('In %s' % sys._getframe().f_code.co_name)
from psana.pyalgos.generic.NDArrGenerators import random_standard
from psana.pyalgos.generic.NDArrUtils import print_ndarr
from time import time
arr_float = random_standard(shape=(100000,), mu=200, sigma=25, dtype=np.float)
arr_int16 = arr_float.astype(np.int16)
print_ndarr(arr_int16, name='arr_int16', first=0, last=10)
t0_sec = time()
ent1 = entropy(arr_int16); t1_sec = time()
ent2 = entropy_v1(arr_int16); t2_sec = time()
ent3 = entropy_cpo(arr_int16); t3_sec = time()
print('entropy(arr_int16) = %.6f, time=%.6f sec' % (ent1, t1_sec-t0_sec))
print('entropy_v1(arr_int16) = %.6f, time=%.6f sec' % (ent2, t2_sec-t1_sec))
print('entropy_cpo(arr_int16) = %.6f, time=%.6f sec' % (ent3, t3_sec-t2_sec))
#------------------------------
def unitest_entropy():
import sys
from psana.pyalgos.generic.NDArrGenerators import random_standard
print('In %s' % sys._getframe().f_code.co_name)
np.random.seed(42)
arr_int16 = random_standard(shape=(100000,), mu=200, sigma=25, dtype=np.int16)
ent1 = entropy(arr_int16);
print('entropy(arr_int16) = %.6f' % ent1)
assert('%.6f'%ent1 == '6.690948')
#------------------------------
if __name__ == "__main__" :
import sys; global sys
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s' % tname)
if tname == '0': test_entropy()
elif tname == '1': test_entropy()
elif tname == '2': unitest_entropy()
else : sys.exit('Test %s is not implemented' % tname)
sys.exit('End of Test %s' % tname)
#------------------------------ | psana/psana/pyalgos/generic/Entropy.py | #------------------------------
import numpy as np
#------------------------------
def hist_values(nda) :
"""Depending on nda.dtype fills/returns 1-D 2^8(16)-bin histogram-array of 8(16)-bit values of input n-d array
"""
#print '%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype))
if nda.dtype == np.uint8 :
return np.bincount(nda.flatten(), weights=None, minlength=1<<8)
elif nda.dtype == np.uint16 :
return np.bincount(nda.flatten(), weights=None, minlength=1<<16)
elif nda.dtype == np.int16 :
unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
return np.bincount(unda.flatten(), weights=None, minlength=1<<16)
else :
sys.exit('method %s get unexpected nda dtype=%s. Use np.uint8 or np.(u)int16'%(FR().f_code.co_name, str(nda.dtype)))
#------------------------------
def hist_probabilities(nda) :
"""Returns histogram-array of probabilities for each of (u)int16, uint8 intensity
"""
#print('%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype)))
nvals = nda.size
ph = np.array(hist_values(nda), dtype=np.float)
ph /= nvals
#print('Check sum of probabilities: %.6f for number of values in array = %d' % (ph.sum(), nvals))
return ph
#------------------------------
def entropy(nda) :
"""Evaluates n-d array entropy using formula from https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
"""
unda = None # histogram array indexes must be unsigned
if nda.dtype == np.uint8 : unda = nda
elif nda.dtype == np.uint16: unda = nda
elif nda.dtype == np.int16 : unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
prob_h = hist_probabilities(unda)
p_log2p_nda = [p*np.log2(p) for p in prob_h if p>0]
ent = -np.sum(p_log2p_nda)
#print_ndarr(hist_values(nda), name='Histogram of uint16 values', first=1500, last=1520)
#print_ndarr(prob_h, name='Histogram of probabilities', first=1500, last=1520)
#print_ndarr(prob_nda, name='per pixel array of probabilities\n', first=1000, last=1010)
#print_ndarr(p_log2p_nda, name='per pixel array of P*log2(P)\n', first=1000, last=1010)
return ent
#------------------------------
## formula in https://en.wikipedia.org/wiki/Entropy_%28information_theory%29
## sums over all (x_i) which is a set of possible values....
## this method sums over set (one entry) of probabilities
#------------------------------
def entropy_v1(nda) :
"""The same as entropy(nda) in a single place.
"""
#print('%s for array dtype=%s'%(FR().f_code.co_name, str(nda.dtype)))
unda = nda
if nda.dtype == np.uint8 : unda = nda
elif nda.dtype == np.uint16 : unda = nda
elif nda.dtype == np.int16 : unda = nda.astype(np.uint16) # int16 (1,2,-3,0,4,-5,...) -> uint16 (1,2,0,65533,4,65531,...)
else : sys.exit('method %s get unexpected nda dtype=%s. Use np.uint8 or np.(u)int16'%(FR().f_code.co_name, str(nda.dtype)))
hsize = (1<<8) if nda.dtype == np.uint8 else (1<<16)
vals_h = np.bincount(unda.flatten(), weights=None, minlength=hsize)
prob_h = np.array(vals_h, dtype=np.float) / unda.size
#prob_nda = prob_h[unda]
#p_log2p_nda = prob_nda * np.log2(prob_nda)
#ent = -p_log2p_nda.sum()
p_log2p_nda = [p*np.log2(p) for p in prob_h if p>0]
ent = -np.sum(p_log2p_nda)
return ent
#------------------------------
def entropy_cpo(signal):
'''Entropy evaluation method found by cpo on web
Function returns entropy of a signal, which is 1-D numpy array
'''
lensig=signal.size
symset=list(set(signal))
numsym=len(symset)
propab=[np.size(signal[signal==i])/(1.0*lensig) for i in symset]
ent=np.sum([p*np.log2(1.0/p) for p in propab])
return ent
#------------------------------
#------------------------------
#------------------------------
def test_entropy():
print('In %s' % sys._getframe().f_code.co_name)
from psana.pyalgos.generic.NDArrGenerators import random_standard
from psana.pyalgos.generic.NDArrUtils import print_ndarr
from time import time
arr_float = random_standard(shape=(100000,), mu=200, sigma=25, dtype=np.float)
arr_int16 = arr_float.astype(np.int16)
print_ndarr(arr_int16, name='arr_int16', first=0, last=10)
t0_sec = time()
ent1 = entropy(arr_int16); t1_sec = time()
ent2 = entropy_v1(arr_int16); t2_sec = time()
ent3 = entropy_cpo(arr_int16); t3_sec = time()
print('entropy(arr_int16) = %.6f, time=%.6f sec' % (ent1, t1_sec-t0_sec))
print('entropy_v1(arr_int16) = %.6f, time=%.6f sec' % (ent2, t2_sec-t1_sec))
print('entropy_cpo(arr_int16) = %.6f, time=%.6f sec' % (ent3, t3_sec-t2_sec))
#------------------------------
def unitest_entropy():
import sys
from psana.pyalgos.generic.NDArrGenerators import random_standard
print('In %s' % sys._getframe().f_code.co_name)
np.random.seed(42)
arr_int16 = random_standard(shape=(100000,), mu=200, sigma=25, dtype=np.int16)
ent1 = entropy(arr_int16);
print('entropy(arr_int16) = %.6f' % ent1)
assert('%.6f'%ent1 == '6.690948')
#------------------------------
if __name__ == "__main__" :
import sys; global sys
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s' % tname)
if tname == '0': test_entropy()
elif tname == '1': test_entropy()
elif tname == '2': unitest_entropy()
else : sys.exit('Test %s is not implemented' % tname)
sys.exit('End of Test %s' % tname)
#------------------------------ | 0.448426 | 0.463809 |
def style_transfer(content_image, style_image,
content_layer_ids, style_layer_ids,
weight_content=1.5, weight_style=10.0,
weight_denoise=0.3,
num_iterations=120, step_size=10.0):
# operations to the graph so it can grow very large
# and run out of RAM if we keep using the same instance.
model = vgg16.VGG16()
# Create a TensorFlow-session.
session = tf.InteractiveSession(graph=model.graph)
# Print the names of the content-layers.
print("Content layers:")
print(model.get_layer_names(content_layer_ids))
print()
# Print the names of the style-layers.
print("Style layers:")
print(model.get_layer_names(style_layer_ids))
print()
# Create the loss-function for the content-layers and -image.
loss_content = create_content_loss(session=session,
model=model,
content_image=content_image,
layer_ids=content_layer_ids)
# Create the loss-function for the style-layers and -image.
loss_style = create_style_loss(session=session,
model=model,
style_image=style_image,
layer_ids=style_layer_ids)
# Create the loss-function for the denoising of the mixed-image.
loss_denoise = create_denoise_loss(model)
# Create TensorFlow variables for adjusting the values of
# the loss-functions. This is explained below.
adj_content = tf.Variable(1e-10, name='adj_content')
adj_style = tf.Variable(1e-10, name='adj_style')
adj_denoise = tf.Variable(1e-10, name='adj_denoise')
# Initialize the adjustment values for the loss-functions.
session.run([adj_content.initializer,
adj_style.initializer,
adj_denoise.initializer])
# Create TensorFlow operations for updating the adjustment values.
# These are basically just the reciprocal values of the
# loss-functions, with a small value 1e-10 added to avoid the
# possibility of division by zero.
update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10))
update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10))
update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10))
# This is the weighted loss-function that we will minimize
# below in order to generate the mixed-image.
# Because we multiply the loss-values with their reciprocal
# adjustment values, we can use relative weights for the
# loss-functions that are easier to select, as they are
# independent of the exact choice of style- and content-layers.
loss_combined = weight_content * adj_content * loss_content + \
weight_style * adj_style * loss_style + \
weight_denoise * adj_denoise * loss_denoise
# Use TensorFlow to get the mathematical function for the
# gradient of the combined loss-function with regard to
# the input image.
gradient = tf.gradients(loss_combined, model.input)
# List of tensors that we will run in each optimization iteration.
run_list = [gradient, update_adj_content, update_adj_style, \
update_adj_denoise]
# The mixed-image is initialized with random noise.
# It is the same size as the content-image.
mixed_image = np.random.rand(*content_image.shape) + 128
for i in range(num_iterations):
# Create a feed-dict with the mixed-image.
feed_dict = model.create_feed_dict(image=mixed_image)
# Use TensorFlow to calculate the value of the
# gradient, as well as updating the adjustment values.
grad, adj_content_val, adj_style_val, adj_denoise_val \
= session.run(run_list, feed_dict=feed_dict)
# Reduce the dimensionality of the gradient.
grad = np.squeeze(grad)
# Scale the step-size according to the gradient-values.
step_size_scaled = step_size / (np.std(grad) + 1e-8)
# Update the image by following the gradient.
mixed_image -= grad * step_size_scaled
# Ensure the image has valid pixel-values between 0 and 255.
mixed_image = np.clip(mixed_image, 0.0, 255.0)
# Print a little progress-indicator.
print(". ", end="")
# Display status once every 10 iterations, and the last.
if (i % 10 == 0) or (i == num_iterations - 1):
print()
print("Iteration:", i)
# Print adjustment weights for loss-functions.
msg = "Weight Adj. for Content: {0:.2e}, Style: {1:.2e}, Denoise: {2:.2e}"
print(msg.format(adj_content_val, adj_style_val, adj_denoise_val))
# Plot the content-, style- and mixed-images.
plot_images(content_image=content_image,
style_image=style_image,
mixed_image=mixed_image)
print()
print("Final image:")
plot_imagebig(mixed_image)
# Close the TensorFlow session to release its resources.
session.close()
# Return the mixed-image.
return mixed_image | Style.py | def style_transfer(content_image, style_image,
content_layer_ids, style_layer_ids,
weight_content=1.5, weight_style=10.0,
weight_denoise=0.3,
num_iterations=120, step_size=10.0):
# operations to the graph so it can grow very large
# and run out of RAM if we keep using the same instance.
model = vgg16.VGG16()
# Create a TensorFlow-session.
session = tf.InteractiveSession(graph=model.graph)
# Print the names of the content-layers.
print("Content layers:")
print(model.get_layer_names(content_layer_ids))
print()
# Print the names of the style-layers.
print("Style layers:")
print(model.get_layer_names(style_layer_ids))
print()
# Create the loss-function for the content-layers and -image.
loss_content = create_content_loss(session=session,
model=model,
content_image=content_image,
layer_ids=content_layer_ids)
# Create the loss-function for the style-layers and -image.
loss_style = create_style_loss(session=session,
model=model,
style_image=style_image,
layer_ids=style_layer_ids)
# Create the loss-function for the denoising of the mixed-image.
loss_denoise = create_denoise_loss(model)
# Create TensorFlow variables for adjusting the values of
# the loss-functions. This is explained below.
adj_content = tf.Variable(1e-10, name='adj_content')
adj_style = tf.Variable(1e-10, name='adj_style')
adj_denoise = tf.Variable(1e-10, name='adj_denoise')
# Initialize the adjustment values for the loss-functions.
session.run([adj_content.initializer,
adj_style.initializer,
adj_denoise.initializer])
# Create TensorFlow operations for updating the adjustment values.
# These are basically just the reciprocal values of the
# loss-functions, with a small value 1e-10 added to avoid the
# possibility of division by zero.
update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10))
update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10))
update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10))
# This is the weighted loss-function that we will minimize
# below in order to generate the mixed-image.
# Because we multiply the loss-values with their reciprocal
# adjustment values, we can use relative weights for the
# loss-functions that are easier to select, as they are
# independent of the exact choice of style- and content-layers.
loss_combined = weight_content * adj_content * loss_content + \
weight_style * adj_style * loss_style + \
weight_denoise * adj_denoise * loss_denoise
# Use TensorFlow to get the mathematical function for the
# gradient of the combined loss-function with regard to
# the input image.
gradient = tf.gradients(loss_combined, model.input)
# List of tensors that we will run in each optimization iteration.
run_list = [gradient, update_adj_content, update_adj_style, \
update_adj_denoise]
# The mixed-image is initialized with random noise.
# It is the same size as the content-image.
mixed_image = np.random.rand(*content_image.shape) + 128
for i in range(num_iterations):
# Create a feed-dict with the mixed-image.
feed_dict = model.create_feed_dict(image=mixed_image)
# Use TensorFlow to calculate the value of the
# gradient, as well as updating the adjustment values.
grad, adj_content_val, adj_style_val, adj_denoise_val \
= session.run(run_list, feed_dict=feed_dict)
# Reduce the dimensionality of the gradient.
grad = np.squeeze(grad)
# Scale the step-size according to the gradient-values.
step_size_scaled = step_size / (np.std(grad) + 1e-8)
# Update the image by following the gradient.
mixed_image -= grad * step_size_scaled
# Ensure the image has valid pixel-values between 0 and 255.
mixed_image = np.clip(mixed_image, 0.0, 255.0)
# Print a little progress-indicator.
print(". ", end="")
# Display status once every 10 iterations, and the last.
if (i % 10 == 0) or (i == num_iterations - 1):
print()
print("Iteration:", i)
# Print adjustment weights for loss-functions.
msg = "Weight Adj. for Content: {0:.2e}, Style: {1:.2e}, Denoise: {2:.2e}"
print(msg.format(adj_content_val, adj_style_val, adj_denoise_val))
# Plot the content-, style- and mixed-images.
plot_images(content_image=content_image,
style_image=style_image,
mixed_image=mixed_image)
print()
print("Final image:")
plot_imagebig(mixed_image)
# Close the TensorFlow session to release its resources.
session.close()
# Return the mixed-image.
return mixed_image | 0.858837 | 0.681432 |
import traceback
import base
import dbutils
import extensions
from textutils import json_encode, json_decode
from operation.basictypes import (OperationResult, OperationError,
OperationFailure, OperationFailureMustLogin)
from operation.typechecker import (Optional, Request, RestrictedString, SHA1,
RestrictedInteger, NonNegativeInteger,
PositiveInteger, Review, Repository, Commit,
File, User, Extension)
class Operation(object):
"""
Base class for operation implementations.
Sub-classes must call Operation.__init__() to define the structure of
expected input data.
An operation accepts input in the form of a JSON object literal and returns
a result in the form of a JSON object literal. The object contains a
property named "status" whose value should be "ok" or "error". If it is
"error", the object contains a property named "error" whose value is an
error message. If the HTTP request method is POST, the input is the request
body (this is the usual case) otherwise, if the HTTP request method is GET,
the input is the value of the "data" URI query parameter (this is supported
to simplify ad-hoc testing).
Operation implementations should inherit this class and implement the
process() method. This method is called with two positional arguments, 'db'
and 'user', and one keyword argument per property in the input value. The
process() method should return an OperationResult object or either return or
raise an OperationError object. Any other raised exceptions are caught and
converted to OperationError objects.
"""
def __init__(self, parameter_types, accept_anonymous_user=False):
"""
Initialize input data type checker.
The parameter_types argument must be a dict object. See TypeChecker and
sub-classes for details on how it works. A parameter types argument of
{ "name": str,
"points": [{"x": int, "y": int }],
"what": Optional(str) }
would for instance represents an input object with two required
properties named "name" and "points", and an optional property named
"what". The "name" and "what" property values should be a strings. The
"points" property value should be an array of objects, each with two
properties named "x" and "y", whose values should be integer.
The operation's process() method would be called with the keyword
arguments "name", "points" and "what".
"""
from operation.typechecker import TypeChecker
if not type(parameter_types) is dict:
raise base.ImplementationError("invalid source type")
self.__checker = TypeChecker.make(parameter_types)
self.__accept_anonymous_user = accept_anonymous_user
def __call__(self, req, db, user):
import auth
from operation.typechecker import TypeCheckerContext
if user.isAnonymous() and not self.__accept_anonymous_user:
return OperationFailureMustLogin()
if req.method == "POST": data = req.read()
else: data = req.getParameter("data")
if not data: raise OperationError("no input")
try: value = json_decode(data)
except ValueError as error: raise OperationError("invalid input: %s" % str(error))
try:
self.__checker(value, TypeCheckerContext(req, db, user))
return self.process(db, user, **value)
except OperationError as error:
return error
except OperationFailure as failure:
return failure
except dbutils.NoSuchUser as error:
return OperationFailure(code="nosuchuser",
title="Who is '%s'?" % error.name,
message="There is no user in Critic's database named that.")
except dbutils.NoSuchReview as error:
return OperationFailure(code="nosuchreview",
title="Invalid review ID",
message="The review ID r/%d is not valid." % error.id)
except auth.AccessDenied as error:
return OperationFailure(code="accessdenied",
title="Access denied",
message=error.message)
except dbutils.TransactionRollbackError:
return OperationFailure(code="transactionrollback",
title="Transaction rolled back",
message="Your database transaction rolled back, probably due to a deadlock. Please try again.")
except extensions.extension.ExtensionError as error:
return OperationFailure(
code="invalidextension",
title="Invalid extension",
message=error.message)
except:
# Decode value again since the type checkers might have modified it.
value = json_decode(data)
error_message = ("User: %s\nReferrer: %s\nData: %s\n\n%s"
% (user.name,
req.getReferrer(),
json_encode(self.sanitize(value), indent=2),
traceback.format_exc()))
db.rollback()
import mailutils
import configuration
if not user.hasRole(db, "developer"):
mailutils.sendExceptionMessage(db, "wsgi[%s]" % req.path, error_message)
if configuration.debug.IS_DEVELOPMENT or user.hasRole(db, "developer"):
return OperationError(error_message)
else:
return OperationError("An unexpected error occurred. " +
"A message has been sent to the system administrator(s) " +
"with details about the problem.")
def process(self, *args, **kwargs):
raise OperationError("not implemented!?!")
def sanitize(self, value):
"""Sanitize arguments value for use in error messages or logs."""
return value
@staticmethod
def requireRole(db, role, user):
if not user.hasRole(db, role):
raise OperationFailure(
code="notallowed",
title="Not allowed!",
message="Operation not permitted, user that lacks role '%s'." % role) | src/operation/__init__.py |
import traceback
import base
import dbutils
import extensions
from textutils import json_encode, json_decode
from operation.basictypes import (OperationResult, OperationError,
OperationFailure, OperationFailureMustLogin)
from operation.typechecker import (Optional, Request, RestrictedString, SHA1,
RestrictedInteger, NonNegativeInteger,
PositiveInteger, Review, Repository, Commit,
File, User, Extension)
class Operation(object):
"""
Base class for operation implementations.
Sub-classes must call Operation.__init__() to define the structure of
expected input data.
An operation accepts input in the form of a JSON object literal and returns
a result in the form of a JSON object literal. The object contains a
property named "status" whose value should be "ok" or "error". If it is
"error", the object contains a property named "error" whose value is an
error message. If the HTTP request method is POST, the input is the request
body (this is the usual case) otherwise, if the HTTP request method is GET,
the input is the value of the "data" URI query parameter (this is supported
to simplify ad-hoc testing).
Operation implementations should inherit this class and implement the
process() method. This method is called with two positional arguments, 'db'
and 'user', and one keyword argument per property in the input value. The
process() method should return an OperationResult object or either return or
raise an OperationError object. Any other raised exceptions are caught and
converted to OperationError objects.
"""
def __init__(self, parameter_types, accept_anonymous_user=False):
"""
Initialize input data type checker.
The parameter_types argument must be a dict object. See TypeChecker and
sub-classes for details on how it works. A parameter types argument of
{ "name": str,
"points": [{"x": int, "y": int }],
"what": Optional(str) }
would for instance represents an input object with two required
properties named "name" and "points", and an optional property named
"what". The "name" and "what" property values should be a strings. The
"points" property value should be an array of objects, each with two
properties named "x" and "y", whose values should be integer.
The operation's process() method would be called with the keyword
arguments "name", "points" and "what".
"""
from operation.typechecker import TypeChecker
if not type(parameter_types) is dict:
raise base.ImplementationError("invalid source type")
self.__checker = TypeChecker.make(parameter_types)
self.__accept_anonymous_user = accept_anonymous_user
def __call__(self, req, db, user):
import auth
from operation.typechecker import TypeCheckerContext
if user.isAnonymous() and not self.__accept_anonymous_user:
return OperationFailureMustLogin()
if req.method == "POST": data = req.read()
else: data = req.getParameter("data")
if not data: raise OperationError("no input")
try: value = json_decode(data)
except ValueError as error: raise OperationError("invalid input: %s" % str(error))
try:
self.__checker(value, TypeCheckerContext(req, db, user))
return self.process(db, user, **value)
except OperationError as error:
return error
except OperationFailure as failure:
return failure
except dbutils.NoSuchUser as error:
return OperationFailure(code="nosuchuser",
title="Who is '%s'?" % error.name,
message="There is no user in Critic's database named that.")
except dbutils.NoSuchReview as error:
return OperationFailure(code="nosuchreview",
title="Invalid review ID",
message="The review ID r/%d is not valid." % error.id)
except auth.AccessDenied as error:
return OperationFailure(code="accessdenied",
title="Access denied",
message=error.message)
except dbutils.TransactionRollbackError:
return OperationFailure(code="transactionrollback",
title="Transaction rolled back",
message="Your database transaction rolled back, probably due to a deadlock. Please try again.")
except extensions.extension.ExtensionError as error:
return OperationFailure(
code="invalidextension",
title="Invalid extension",
message=error.message)
except:
# Decode value again since the type checkers might have modified it.
value = json_decode(data)
error_message = ("User: %s\nReferrer: %s\nData: %s\n\n%s"
% (user.name,
req.getReferrer(),
json_encode(self.sanitize(value), indent=2),
traceback.format_exc()))
db.rollback()
import mailutils
import configuration
if not user.hasRole(db, "developer"):
mailutils.sendExceptionMessage(db, "wsgi[%s]" % req.path, error_message)
if configuration.debug.IS_DEVELOPMENT or user.hasRole(db, "developer"):
return OperationError(error_message)
else:
return OperationError("An unexpected error occurred. " +
"A message has been sent to the system administrator(s) " +
"with details about the problem.")
def process(self, *args, **kwargs):
raise OperationError("not implemented!?!")
def sanitize(self, value):
"""Sanitize arguments value for use in error messages or logs."""
return value
@staticmethod
def requireRole(db, role, user):
if not user.hasRole(db, role):
raise OperationFailure(
code="notallowed",
title="Not allowed!",
message="Operation not permitted, user that lacks role '%s'." % role) | 0.756537 | 0.229438 |
import os
from pyfakefs.fake_filesystem_unittest import TestCase
import backend
class TestDatasets(TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs.create_dir(backend.DATASETS_PATH)
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img2.nii'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img3'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'abc.jpg'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', '.dotfile'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset2'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset3'))
def test_get_datasets_length(self):
datasets = backend.get_datasets()
num_datasets = len(datasets)
self.assertEqual(num_datasets, 3)
def test_get_datasets_names(self):
datasets = backend.get_datasets()
self.assertEqual(datasets[0].name, 'dataset1')
self.assertEqual(datasets[1].name, 'dataset2')
self.assertEqual(datasets[2].name, 'dataset3')
def test_get_datasets_paths(self):
datasets = backend.get_datasets()
self.assertEqual(datasets[0].path, os.path.join(backend.DATASETS_PATH, 'dataset1'))
self.assertEqual(datasets[1].path, os.path.join(backend.DATASETS_PATH, 'dataset2'))
self.assertEqual(datasets[2].path, os.path.join(backend.DATASETS_PATH, 'dataset3'))
def test_get_dataset_name(self):
dataset = backend.get_dataset('dataset1')
self.assertEqual(dataset.name, 'dataset1')
def test_get_dataset_path(self):
dataset = backend.get_dataset('dataset1')
self.assertEqual(dataset.path, os.path.join(backend.DATASETS_PATH, 'dataset1'))
def test_get_dataset_non_existent(self):
non_existent_dataset = backend.get_dataset('dataset_non_existent')
self.assertIsNone(non_existent_dataset)
def test_is_image_path(self):
dataset_path = os.path.join(backend.DATASETS_PATH, 'dataset1')
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img1.nii.gz')))
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img2.nii')))
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img3')))
self.assertFalse(backend.is_image_path(os.path.join(dataset_path, 'abc.jpg')))
self.assertFalse(backend.is_image_path(os.path.join(dataset_path, '.dotfile')))
def test_get_images_length(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
num_images = len(images)
self.assertEqual(num_images, 3)
def test_get_images_datasets(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].dataset, dataset)
self.assertEqual(images[1].dataset, dataset)
self.assertEqual(images[2].dataset, dataset)
def test_get_images_names(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].name, 'img1.nii.gz')
self.assertEqual(images[1].name, 'img2.nii')
self.assertEqual(images[2].name, 'img3')
def test_get_images_paths(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
self.assertEqual(images[1].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img2.nii'))
self.assertEqual(images[2].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img3'))
def test_get_image_dataset(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.dataset, dataset)
def test_get_image_name(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.name, 'img1.nii.gz')
def test_get_image_path(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
def test_get_image_non_existent(self):
dataset = backend.get_dataset('dataset1')
with self.assertRaises(AssertionError):
backend.get_image(dataset, 'non_existent_image.nii.gz')
def test_get_image_by_index_image_count(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(num_images, 3)
def test_get_image_by_index_image_count_empty(self):
dataset = backend.get_dataset('dataset2')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(num_images, 0)
def test_get_image_by_index_dataset(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.dataset, dataset)
def test_get_image_by_index_name(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.name, 'img1.nii.gz')
def test_get_image_by_index_path(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
def test_get_image_by_index_out_of_bounds_lower(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, -1)
self.assertIsNone(image)
def test_get_image_by_index_out_of_bounds_upper(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 3)
self.assertIsNone(image)
class TestDatasetsEmpty(TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs.create_dir(backend.DATASETS_PATH)
def test_get_datasets_empty(self):
datasets = backend.get_datasets()
num_datasets = len(datasets)
self.assertEqual(num_datasets, 0) | tests/test_backend.py | import os
from pyfakefs.fake_filesystem_unittest import TestCase
import backend
class TestDatasets(TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs.create_dir(backend.DATASETS_PATH)
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img2.nii'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset1', 'img3'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', 'abc.jpg'))
self.fs.create_file(os.path.join(backend.DATASETS_PATH, 'dataset1', '.dotfile'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset2'))
self.fs.create_dir(os.path.join(backend.DATASETS_PATH, 'dataset3'))
def test_get_datasets_length(self):
datasets = backend.get_datasets()
num_datasets = len(datasets)
self.assertEqual(num_datasets, 3)
def test_get_datasets_names(self):
datasets = backend.get_datasets()
self.assertEqual(datasets[0].name, 'dataset1')
self.assertEqual(datasets[1].name, 'dataset2')
self.assertEqual(datasets[2].name, 'dataset3')
def test_get_datasets_paths(self):
datasets = backend.get_datasets()
self.assertEqual(datasets[0].path, os.path.join(backend.DATASETS_PATH, 'dataset1'))
self.assertEqual(datasets[1].path, os.path.join(backend.DATASETS_PATH, 'dataset2'))
self.assertEqual(datasets[2].path, os.path.join(backend.DATASETS_PATH, 'dataset3'))
def test_get_dataset_name(self):
dataset = backend.get_dataset('dataset1')
self.assertEqual(dataset.name, 'dataset1')
def test_get_dataset_path(self):
dataset = backend.get_dataset('dataset1')
self.assertEqual(dataset.path, os.path.join(backend.DATASETS_PATH, 'dataset1'))
def test_get_dataset_non_existent(self):
non_existent_dataset = backend.get_dataset('dataset_non_existent')
self.assertIsNone(non_existent_dataset)
def test_is_image_path(self):
dataset_path = os.path.join(backend.DATASETS_PATH, 'dataset1')
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img1.nii.gz')))
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img2.nii')))
self.assertTrue(backend.is_image_path(os.path.join(dataset_path, 'img3')))
self.assertFalse(backend.is_image_path(os.path.join(dataset_path, 'abc.jpg')))
self.assertFalse(backend.is_image_path(os.path.join(dataset_path, '.dotfile')))
def test_get_images_length(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
num_images = len(images)
self.assertEqual(num_images, 3)
def test_get_images_datasets(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].dataset, dataset)
self.assertEqual(images[1].dataset, dataset)
self.assertEqual(images[2].dataset, dataset)
def test_get_images_names(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].name, 'img1.nii.gz')
self.assertEqual(images[1].name, 'img2.nii')
self.assertEqual(images[2].name, 'img3')
def test_get_images_paths(self):
dataset = backend.get_dataset('dataset1')
images = backend.get_images(dataset)
self.assertEqual(images[0].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
self.assertEqual(images[1].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img2.nii'))
self.assertEqual(images[2].path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img3'))
def test_get_image_dataset(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.dataset, dataset)
def test_get_image_name(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.name, 'img1.nii.gz')
def test_get_image_path(self):
dataset = backend.get_dataset('dataset1')
image = backend.get_image(dataset, 'img1.nii.gz')
self.assertEqual(image.path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
def test_get_image_non_existent(self):
dataset = backend.get_dataset('dataset1')
with self.assertRaises(AssertionError):
backend.get_image(dataset, 'non_existent_image.nii.gz')
def test_get_image_by_index_image_count(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(num_images, 3)
def test_get_image_by_index_image_count_empty(self):
dataset = backend.get_dataset('dataset2')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(num_images, 0)
def test_get_image_by_index_dataset(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.dataset, dataset)
def test_get_image_by_index_name(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.name, 'img1.nii.gz')
def test_get_image_by_index_path(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 0)
self.assertEqual(image.path, os.path.join(backend.DATASETS_PATH, 'dataset1', 'img1.nii.gz'))
def test_get_image_by_index_out_of_bounds_lower(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, -1)
self.assertIsNone(image)
def test_get_image_by_index_out_of_bounds_upper(self):
dataset = backend.get_dataset('dataset1')
image, num_images = backend.get_image_by_index(dataset, 3)
self.assertIsNone(image)
class TestDatasetsEmpty(TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs.create_dir(backend.DATASETS_PATH)
def test_get_datasets_empty(self):
datasets = backend.get_datasets()
num_datasets = len(datasets)
self.assertEqual(num_datasets, 0) | 0.464902 | 0.379005 |
import argparse
import time
import util
import azureutils
import sys
config_set_name = "apache-test-application"
def parse_args ():
# Ugly but expedient conversion of ansible-playbook to a parameterized python script
parser = argparse.ArgumentParser()
parser.add_argument("--NODE_IP", type=str, required=True)
parser.add_argument("--SUBSCRIPTION_ID", type=str, required=True)
parser.add_argument("--SERVICE_PRINCIPAL_SECRET", type=str, required=True)
parser.add_argument("--TENANT_ID", type=str, required=True)
parser.add_argument("--CLIENT_ID", type=str, required=True)
return parser.parse_args()
def get_ssg_reference ():
return util.req(
"http://localhost:8100/cm/cloud/service-scaling-groups/",
None
).json()
def poll_for_ssg_present (timeout=1200):
count = 0
while True:
result = get_ssg_reference()
if count >= timeout:
sys.exit(-1)
break
if not result["items"]:
time.sleep(1)
else:
break
util.print_partial(".")
count += 1
def poll_for_ssg_ready (ssg_id, timeout=1200):
url = "http://localhost:8100/cm/cloud/service-scaling-groups/" + ssg_id
count = 0
while True:
if count >= timeout:
sys.exit(-1)
break
result = util.req(url, None)
status = result.json()["status"]
if status == "READY":
break
else:
time.sleep(1)
util.print_partial(".")
count += 1
def deploy_application (ssg_id, node_ip, alb_dns_name):
util.req(
"http://localhost:8100/cm/global/tasks/apply-template",
None,
method="POST",
json={
"resources": {
"ltm:virtual:90735960bf4b": [
{
"parameters": {
"name": "default_vs"
},
"parametersToRemove": [],
"subcollectionResources": {
"profiles:78b1bcfdafad": [
{
"parameters": {},
"parametersToRemove": []
}
],
"profiles:2f52acac9fde": [
{
"parameters": {},
"parametersToRemove": []
}
],
"profiles:9448fe71611e": [
{
"parameters": {},
"parametersToRemove": []
}
]
}
}
],
"ltm:pool:8bc5b256f9d1": [
{
"parameters": {
"name": "pool_0"
},
"parametersToRemove": [],
"subcollectionResources": {
"members:dec6d24dc625": [
{
"parameters": {
"port": "80",
"nodeReference": {
"link": "#/resources/ltm:node:c072248f8e6a/" + node_ip,
"fullPath": "# " + node_ip
}
},
"parametersToRemove": []
}
]
}
}
],
"ltm:node:c072248f8e6a": [
{
"parameters": {
"name": node_ip,
"address": node_ip
},
"parametersToRemove": []
}
],
"ltm:monitor:http:18765a198150": [
{
"parameters": {
"name": "monitor-http"
},
"parametersToRemove": []
}
],
"ltm:profile:client-ssl:78b1bcfdafad": [
{
"parameters": {
"name": "clientssl"
},
"parametersToRemove": []
}
],
"ltm:profile:http:2f52acac9fde": [
{
"parameters": {
"name": "profile_http"
},
"parametersToRemove": []
}
]
},
"addAnalytics": True,
"domains": [
{
"domainName": alb_dns_name
}
],
"configSetName": config_set_name,
"ssgReference": {
"link": "https://localhost/mgmt/cm/cloud/service-scaling-groups/" + ssg_id
},
"azureLoadBalancer": {
"listeners": [
{
"loadBalancerPort": 443,
"instancePort": 443
},
{
"loadBalancerPort": 80,
"instancePort": 80
}
]
},
"subPath": config_set_name,
"templateReference": {
"link": "https://localhost/mgmt/cm/global/templates/10e8d657-ed1c-3cc9-962d-f291ef02512e"
},
"mode": "CREATE"
}
)
# supplement method to construct fqdn for given alb_dns_name
def sanitizeAndGetDnsName(resource_group_name = "", alb_dns_name = ""):
sanitized_dns_name = ""
try:
if alb_dns_name.startswith(resource_group_name):
index_of = alb_dns_name.index(".")
sanitized_dns_name = alb_dns_name[:index_of]
sanitized_dns_name = sanitized_dns_name + '-' + config_set_name + alb_dns_name[index_of:]
except Exception as e:
util.print_partial("Error occured while fetching dns Name " + resource_group_name + "," + alb_dns_name+" \n error:"+str(e))
return sanitized_dns_name
# Method to get FQDN to access demo application on SSG
def getDnsName(args):
resource_group_name = ""
alb_dns_name = ""
try:
resource_group_name = azureutils.getContentsOfResourceGroupLockFile()
credentials = azureutils.getCredentials(args.TENANT_ID, args.CLIENT_ID, args.SERVICE_PRINCIPAL_SECRET)
client = azureutils.getResourceClient(credentials , args.SUBSCRIPTION_ID)
alb_dns_name = azureutils.getDnsName(client, resource_group_name, args.SUBSCRIPTION_ID)
# Since public ip is created dynamically after deploy application python script execution
# Fetching the sample public ip name without demo application and sanitizing it accordingly
# eg: Dns name associated with alb initially would be azure-f5-ssg.eastus.cloudapp.azure.com
# After below method invocation:o/p would be azure-f5-ssg-apache-test-application.eastus.cloudapp.azure.com
alb_dns_name = sanitizeAndGetDnsName(resource_group_name , alb_dns_name)
util.print_partial('Application can be accessible through https on dns Name:' + alb_dns_name)
except Exception as e:
util.print_partial("Exception occurred while fetching azure dns name associated with ssg's resource group "+resource_group_name+" ,failed with error:"+str(e))
return alb_dns_name
def main():
args = parse_args()
util.print_partial("Waiting for SSG to be present...")
poll_for_ssg_present()
util.complete()
util.print_partial("Getting SSG reference...")
ssgs = get_ssg_reference()
util.complete()
# Let this reference be unsafe and tacky so that this fails loudly if the SSG is not present
ssg_id = ssgs["items"][0]["id"]
util.print_partial("Waiting for SSG to be ready...")
poll_for_ssg_ready(ssg_id)
util.complete()
time.sleep(180) # Three minute wait for SSG to settle down
util.print_partial("Getting ALB DNS Name reference...")
alb_dns_name = getDnsName(args)
util.complete()
#TODO: can delete file here azureutils.deleteLockFile()
util.print_partial("Deploying application...")
deploy_application(ssg_id, args.NODE_IP, alb_dns_name)
util.complete()
if __name__ == '__main__':
main() | azure/scripts/deploy-application.py | import argparse
import time
import util
import azureutils
import sys
config_set_name = "apache-test-application"
def parse_args ():
# Ugly but expedient conversion of ansible-playbook to a parameterized python script
parser = argparse.ArgumentParser()
parser.add_argument("--NODE_IP", type=str, required=True)
parser.add_argument("--SUBSCRIPTION_ID", type=str, required=True)
parser.add_argument("--SERVICE_PRINCIPAL_SECRET", type=str, required=True)
parser.add_argument("--TENANT_ID", type=str, required=True)
parser.add_argument("--CLIENT_ID", type=str, required=True)
return parser.parse_args()
def get_ssg_reference ():
return util.req(
"http://localhost:8100/cm/cloud/service-scaling-groups/",
None
).json()
def poll_for_ssg_present (timeout=1200):
count = 0
while True:
result = get_ssg_reference()
if count >= timeout:
sys.exit(-1)
break
if not result["items"]:
time.sleep(1)
else:
break
util.print_partial(".")
count += 1
def poll_for_ssg_ready (ssg_id, timeout=1200):
url = "http://localhost:8100/cm/cloud/service-scaling-groups/" + ssg_id
count = 0
while True:
if count >= timeout:
sys.exit(-1)
break
result = util.req(url, None)
status = result.json()["status"]
if status == "READY":
break
else:
time.sleep(1)
util.print_partial(".")
count += 1
def deploy_application (ssg_id, node_ip, alb_dns_name):
util.req(
"http://localhost:8100/cm/global/tasks/apply-template",
None,
method="POST",
json={
"resources": {
"ltm:virtual:90735960bf4b": [
{
"parameters": {
"name": "default_vs"
},
"parametersToRemove": [],
"subcollectionResources": {
"profiles:78b1bcfdafad": [
{
"parameters": {},
"parametersToRemove": []
}
],
"profiles:2f52acac9fde": [
{
"parameters": {},
"parametersToRemove": []
}
],
"profiles:9448fe71611e": [
{
"parameters": {},
"parametersToRemove": []
}
]
}
}
],
"ltm:pool:8bc5b256f9d1": [
{
"parameters": {
"name": "pool_0"
},
"parametersToRemove": [],
"subcollectionResources": {
"members:dec6d24dc625": [
{
"parameters": {
"port": "80",
"nodeReference": {
"link": "#/resources/ltm:node:c072248f8e6a/" + node_ip,
"fullPath": "# " + node_ip
}
},
"parametersToRemove": []
}
]
}
}
],
"ltm:node:c072248f8e6a": [
{
"parameters": {
"name": node_ip,
"address": node_ip
},
"parametersToRemove": []
}
],
"ltm:monitor:http:18765a198150": [
{
"parameters": {
"name": "monitor-http"
},
"parametersToRemove": []
}
],
"ltm:profile:client-ssl:78b1bcfdafad": [
{
"parameters": {
"name": "clientssl"
},
"parametersToRemove": []
}
],
"ltm:profile:http:2f52acac9fde": [
{
"parameters": {
"name": "profile_http"
},
"parametersToRemove": []
}
]
},
"addAnalytics": True,
"domains": [
{
"domainName": alb_dns_name
}
],
"configSetName": config_set_name,
"ssgReference": {
"link": "https://localhost/mgmt/cm/cloud/service-scaling-groups/" + ssg_id
},
"azureLoadBalancer": {
"listeners": [
{
"loadBalancerPort": 443,
"instancePort": 443
},
{
"loadBalancerPort": 80,
"instancePort": 80
}
]
},
"subPath": config_set_name,
"templateReference": {
"link": "https://localhost/mgmt/cm/global/templates/10e8d657-ed1c-3cc9-962d-f291ef02512e"
},
"mode": "CREATE"
}
)
# supplement method to construct fqdn for given alb_dns_name
def sanitizeAndGetDnsName(resource_group_name = "", alb_dns_name = ""):
sanitized_dns_name = ""
try:
if alb_dns_name.startswith(resource_group_name):
index_of = alb_dns_name.index(".")
sanitized_dns_name = alb_dns_name[:index_of]
sanitized_dns_name = sanitized_dns_name + '-' + config_set_name + alb_dns_name[index_of:]
except Exception as e:
util.print_partial("Error occured while fetching dns Name " + resource_group_name + "," + alb_dns_name+" \n error:"+str(e))
return sanitized_dns_name
# Method to get FQDN to access demo application on SSG
def getDnsName(args):
resource_group_name = ""
alb_dns_name = ""
try:
resource_group_name = azureutils.getContentsOfResourceGroupLockFile()
credentials = azureutils.getCredentials(args.TENANT_ID, args.CLIENT_ID, args.SERVICE_PRINCIPAL_SECRET)
client = azureutils.getResourceClient(credentials , args.SUBSCRIPTION_ID)
alb_dns_name = azureutils.getDnsName(client, resource_group_name, args.SUBSCRIPTION_ID)
# Since public ip is created dynamically after deploy application python script execution
# Fetching the sample public ip name without demo application and sanitizing it accordingly
# eg: Dns name associated with alb initially would be azure-f5-ssg.eastus.cloudapp.azure.com
# After below method invocation:o/p would be azure-f5-ssg-apache-test-application.eastus.cloudapp.azure.com
alb_dns_name = sanitizeAndGetDnsName(resource_group_name , alb_dns_name)
util.print_partial('Application can be accessible through https on dns Name:' + alb_dns_name)
except Exception as e:
util.print_partial("Exception occurred while fetching azure dns name associated with ssg's resource group "+resource_group_name+" ,failed with error:"+str(e))
return alb_dns_name
def main():
args = parse_args()
util.print_partial("Waiting for SSG to be present...")
poll_for_ssg_present()
util.complete()
util.print_partial("Getting SSG reference...")
ssgs = get_ssg_reference()
util.complete()
# Let this reference be unsafe and tacky so that this fails loudly if the SSG is not present
ssg_id = ssgs["items"][0]["id"]
util.print_partial("Waiting for SSG to be ready...")
poll_for_ssg_ready(ssg_id)
util.complete()
time.sleep(180) # Three minute wait for SSG to settle down
util.print_partial("Getting ALB DNS Name reference...")
alb_dns_name = getDnsName(args)
util.complete()
#TODO: can delete file here azureutils.deleteLockFile()
util.print_partial("Deploying application...")
deploy_application(ssg_id, args.NODE_IP, alb_dns_name)
util.complete()
if __name__ == '__main__':
main() | 0.22051 | 0.176778 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.shape_inference as C
from onnx import ModelProto
from typing import Text, Union
def infer_shapes(model: Union[ModelProto, bytes], check_type: bool = False, strict_mode: bool = False, data_prop: bool = False) -> ModelProto:
"""Apply shape inference to the provided ModelProto.
Inferred shapes are added to the value_info field of the graph.
If the inferred values conflict with values already provided in the
graph, that means that the provided values are invalid (or there is a
bug in shape inference), and the result is unspecified.
Arguments:
model (Union[ModelProto, Text, bytes], bool, bool, bool) -> ModelProto
check_type (bool): Checks the type-equality for input and output
strict_mode (bool): Stricter shape inference, it will throw errors if any;
Otherwise, simply stop if any error
data_prop (bool): Enables data propagation for limited operators to perform shape computation
Returns:
(ModelProto) model with inferred shape information
"""
if isinstance(model, (ModelProto, bytes)):
model_str = model if isinstance(model, bytes) else model.SerializeToString()
inferred_model_str = C.infer_shapes(model_str, check_type, strict_mode, data_prop)
return onnx.load_from_string(inferred_model_str)
elif isinstance(model, str):
raise TypeError('infer_shapes only accepts ModelProto or bytes,'
'you can use infer_shapes_path for the model path (String).')
else:
raise TypeError('infer_shapes only accepts ModelProto or bytes, '
'incorrect type: {}'.format(type(model)))
def infer_shapes_path(model_path: Text, output_path: Text = '', check_type: bool = False, strict_mode: bool = False, data_prop: bool = False) -> None:
"""
Take model path for shape_inference same as infer_shape; it support >2GB models
Directly output the inferred model to the output_path; Default is the original model path
"""
if isinstance(model_path, ModelProto):
raise TypeError('infer_shapes_path only accepts model Path (String),'
'you can use infer_shapes for the ModelProto.')
# Directly output the inferred model into the specified path, return nothing
elif isinstance(model_path, str):
# If output_path is not defined, default output_path would be the original model path
if output_path == '':
output_path = model_path
C.infer_shapes_path(model_path, output_path, check_type, strict_mode, data_prop)
else:
raise TypeError('infer_shapes_path only accepts model path (String), '
'incorrect type: {}'.format(type(model_path)))
InferenceError = C.InferenceError | onnx/shape_inference.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.shape_inference as C
from onnx import ModelProto
from typing import Text, Union
def infer_shapes(model: Union[ModelProto, bytes], check_type: bool = False, strict_mode: bool = False, data_prop: bool = False) -> ModelProto:
"""Apply shape inference to the provided ModelProto.
Inferred shapes are added to the value_info field of the graph.
If the inferred values conflict with values already provided in the
graph, that means that the provided values are invalid (or there is a
bug in shape inference), and the result is unspecified.
Arguments:
model (Union[ModelProto, Text, bytes], bool, bool, bool) -> ModelProto
check_type (bool): Checks the type-equality for input and output
strict_mode (bool): Stricter shape inference, it will throw errors if any;
Otherwise, simply stop if any error
data_prop (bool): Enables data propagation for limited operators to perform shape computation
Returns:
(ModelProto) model with inferred shape information
"""
if isinstance(model, (ModelProto, bytes)):
model_str = model if isinstance(model, bytes) else model.SerializeToString()
inferred_model_str = C.infer_shapes(model_str, check_type, strict_mode, data_prop)
return onnx.load_from_string(inferred_model_str)
elif isinstance(model, str):
raise TypeError('infer_shapes only accepts ModelProto or bytes,'
'you can use infer_shapes_path for the model path (String).')
else:
raise TypeError('infer_shapes only accepts ModelProto or bytes, '
'incorrect type: {}'.format(type(model)))
def infer_shapes_path(model_path: Text, output_path: Text = '', check_type: bool = False, strict_mode: bool = False, data_prop: bool = False) -> None:
"""
Take model path for shape_inference same as infer_shape; it support >2GB models
Directly output the inferred model to the output_path; Default is the original model path
"""
if isinstance(model_path, ModelProto):
raise TypeError('infer_shapes_path only accepts model Path (String),'
'you can use infer_shapes for the ModelProto.')
# Directly output the inferred model into the specified path, return nothing
elif isinstance(model_path, str):
# If output_path is not defined, default output_path would be the original model path
if output_path == '':
output_path = model_path
C.infer_shapes_path(model_path, output_path, check_type, strict_mode, data_prop)
else:
raise TypeError('infer_shapes_path only accepts model path (String), '
'incorrect type: {}'.format(type(model_path)))
InferenceError = C.InferenceError | 0.908476 | 0.419113 |
import datetime
import json
import re
from rdr_service import config
from rdr_service.dao.bigquery_sync_dao import BigQuerySyncDao, BigQueryGenerator
from rdr_service.model.bq_base import BQRecord
from rdr_service.model.bq_participant_summary import BQParticipantSummarySchema, BQParticipantSummary
from rdr_service.model.bq_pdr_participant_summary import BQPDRParticipantSummary
from rdr_service.resource.generators import ParticipantSummaryGenerator
# helpers to map from the resource participant summary to the bigquery participant summary.
SUB_PREFIXES = {
'modules': 'mod_',
'pm': 'pm_',
'samples': 'bbs_',
'biobank_orders': 'bbo_'
}
SUB_FIELD_MAP = {
'module_authored': 'mod_authored',
'module_created': 'mod_created',
}
class BQParticipantSummaryGenerator(BigQueryGenerator):
"""
Generate a Participant Summary BQRecord object
"""
ro_dao = None
# Retrieve module and sample test lists from config.
_baseline_modules = [mod.replace('questionnaireOn', '')
for mod in config.getSettingList('baseline_ppi_questionnaire_fields')]
_baseline_sample_test_codes = config.getSettingList('baseline_sample_test_codes')
_dna_sample_test_codes = config.getSettingList('dna_sample_test_codes')
def _fix_prefixes(self, st_name, st_data):
"""
Fix sub-table prefixes, this is a recursive function.
:param table: sub-table key
:param data: sub-table dict
:return: dict
"""
data = dict()
for k, v in st_data.items():
# Add prefixes to each field unless it is a sub-table.
nk = f'{SUB_PREFIXES[st_name]}{k}' if k not in SUB_FIELD_MAP else SUB_FIELD_MAP[k]
if k not in SUB_PREFIXES:
data[nk] = v
else:
# Recursively process the next sub-table. IE: Biobank order samples.
data[nk] = [self._fix_prefixes(k, r) for r in st_data[k]]
return data
def make_bqrecord(self, p_id, convert_to_enum=False):
"""
Build a Participant Summary BQRecord object for the given participant id.
:param p_id: participant id
:param convert_to_enum: If schema field description includes Enum class info, convert value to Enum.
:return: BQRecord object
"""
# NOTE: Generator code is now only in 'rdr_service/resource/generators/participant.py'.
res = ParticipantSummaryGenerator().make_resource(p_id)
summary = res.get_data()
# Add sub-table field prefixes back in and map a few other fields.
for k, v in SUB_PREFIXES.items(): # pylint: disable=unused-variable
if k not in summary:
continue
summary[k] = [self._fix_prefixes(k, r) for r in summary[k]]
# Convert participant id to an integer
if 'participant_id' in summary and summary['participant_id']:
summary['participant_id'] = int(re.sub("[^0-9]", "", str(summary['participant_id'])))
return BQRecord(schema=BQParticipantSummarySchema, data=summary, convert_to_enum=convert_to_enum)
def patch_bqrecord(self, p_id, data):
"""
Upsert data into an existing resource. Warning: No data recalculation is performed in this method.
Note: This method uses the MySQL JSON_SET function to update the resource field in the backend.
It does not return the full resource record here.
https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html#function_json-set
:param p_id: participant id
:param data: dict object
:return: dict
"""
sql_json_set_values = ', '.join([f"'$.{k}', :p_{k}" for k, v in data.items()])
args = {'pid': p_id, 'table_id': 'participant_summary', 'modified': datetime.datetime.utcnow()}
for k, v in data.items():
args[f'p_{k}'] = v
sql = f"""
update bigquery_sync
set modified = :modified, resource = json_set(resource, {sql_json_set_values})
where pk_id = :pid and table_id = :table_id
"""
dao = BigQuerySyncDao(backup=False)
with dao.session() as session:
session.execute(sql, args)
sql = 'select resource from bigquery_sync where pk_id = :pid and table_id = :table_id limit 1'
rec = session.execute(sql, args).first()
if rec:
return BQRecord(schema=BQParticipantSummarySchema, data=json.loads(rec.resource),
convert_to_enum=False)
return None
def rebuild_bq_participant(p_id, ps_bqgen=None, pdr_bqgen=None, project_id=None, patch_data=None):
"""
Rebuild a BQ record for a specific participant
:param p_id: participant id
:param ps_bqgen: BQParticipantSummaryGenerator object
:param pdr_bqgen: BQPDRParticipantSummaryGenerator object
:param project_id: Project ID override value.
:param patch_data: dict of resource values to update/insert.
:return:
"""
# Allow for batch requests to rebuild participant summary data.
if not ps_bqgen:
ps_bqgen = BQParticipantSummaryGenerator()
if not pdr_bqgen:
from rdr_service.dao.bq_pdr_participant_summary_dao import BQPDRParticipantSummaryGenerator
pdr_bqgen = BQPDRParticipantSummaryGenerator()
# See if this is a partial update.
if patch_data and isinstance(patch_data, dict):
ps_bqr = ps_bqgen.patch_bqrecord(p_id, patch_data)
else:
ps_bqr = ps_bqgen.make_bqrecord(p_id)
# Since the PDR participant summary is primarily a subset of the Participant Summary, call the full
# Participant Summary generator and take what we need from it.
pdr_bqr = pdr_bqgen.make_bqrecord(p_id, ps_bqr=ps_bqr)
w_dao = BigQuerySyncDao()
with w_dao.session() as w_session:
# save the participant summary record if this is a full rebuild.
if not patch_data and isinstance(patch_data, dict):
ps_bqgen.save_bqrecord(p_id, ps_bqr, bqtable=BQParticipantSummary, w_dao=w_dao, w_session=w_session,
project_id=project_id)
# save the PDR participant summary record
pdr_bqgen.save_bqrecord(p_id, pdr_bqr, bqtable=BQPDRParticipantSummary, w_dao=w_dao, w_session=w_session,
project_id=project_id)
w_session.flush()
return ps_bqr
def bq_participant_summary_update_task(p_id):
"""
Cloud task to update the Participant Summary record for the given participant.
:param p_id: Participant ID
"""
rebuild_bq_participant(p_id) | rdr_service/dao/bq_participant_summary_dao.py | import datetime
import json
import re
from rdr_service import config
from rdr_service.dao.bigquery_sync_dao import BigQuerySyncDao, BigQueryGenerator
from rdr_service.model.bq_base import BQRecord
from rdr_service.model.bq_participant_summary import BQParticipantSummarySchema, BQParticipantSummary
from rdr_service.model.bq_pdr_participant_summary import BQPDRParticipantSummary
from rdr_service.resource.generators import ParticipantSummaryGenerator
# helpers to map from the resource participant summary to the bigquery participant summary.
SUB_PREFIXES = {
'modules': 'mod_',
'pm': 'pm_',
'samples': 'bbs_',
'biobank_orders': 'bbo_'
}
SUB_FIELD_MAP = {
'module_authored': 'mod_authored',
'module_created': 'mod_created',
}
class BQParticipantSummaryGenerator(BigQueryGenerator):
"""
Generate a Participant Summary BQRecord object
"""
ro_dao = None
# Retrieve module and sample test lists from config.
_baseline_modules = [mod.replace('questionnaireOn', '')
for mod in config.getSettingList('baseline_ppi_questionnaire_fields')]
_baseline_sample_test_codes = config.getSettingList('baseline_sample_test_codes')
_dna_sample_test_codes = config.getSettingList('dna_sample_test_codes')
def _fix_prefixes(self, st_name, st_data):
"""
Fix sub-table prefixes, this is a recursive function.
:param table: sub-table key
:param data: sub-table dict
:return: dict
"""
data = dict()
for k, v in st_data.items():
# Add prefixes to each field unless it is a sub-table.
nk = f'{SUB_PREFIXES[st_name]}{k}' if k not in SUB_FIELD_MAP else SUB_FIELD_MAP[k]
if k not in SUB_PREFIXES:
data[nk] = v
else:
# Recursively process the next sub-table. IE: Biobank order samples.
data[nk] = [self._fix_prefixes(k, r) for r in st_data[k]]
return data
def make_bqrecord(self, p_id, convert_to_enum=False):
"""
Build a Participant Summary BQRecord object for the given participant id.
:param p_id: participant id
:param convert_to_enum: If schema field description includes Enum class info, convert value to Enum.
:return: BQRecord object
"""
# NOTE: Generator code is now only in 'rdr_service/resource/generators/participant.py'.
res = ParticipantSummaryGenerator().make_resource(p_id)
summary = res.get_data()
# Add sub-table field prefixes back in and map a few other fields.
for k, v in SUB_PREFIXES.items(): # pylint: disable=unused-variable
if k not in summary:
continue
summary[k] = [self._fix_prefixes(k, r) for r in summary[k]]
# Convert participant id to an integer
if 'participant_id' in summary and summary['participant_id']:
summary['participant_id'] = int(re.sub("[^0-9]", "", str(summary['participant_id'])))
return BQRecord(schema=BQParticipantSummarySchema, data=summary, convert_to_enum=convert_to_enum)
def patch_bqrecord(self, p_id, data):
"""
Upsert data into an existing resource. Warning: No data recalculation is performed in this method.
Note: This method uses the MySQL JSON_SET function to update the resource field in the backend.
It does not return the full resource record here.
https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html#function_json-set
:param p_id: participant id
:param data: dict object
:return: dict
"""
sql_json_set_values = ', '.join([f"'$.{k}', :p_{k}" for k, v in data.items()])
args = {'pid': p_id, 'table_id': 'participant_summary', 'modified': datetime.datetime.utcnow()}
for k, v in data.items():
args[f'p_{k}'] = v
sql = f"""
update bigquery_sync
set modified = :modified, resource = json_set(resource, {sql_json_set_values})
where pk_id = :pid and table_id = :table_id
"""
dao = BigQuerySyncDao(backup=False)
with dao.session() as session:
session.execute(sql, args)
sql = 'select resource from bigquery_sync where pk_id = :pid and table_id = :table_id limit 1'
rec = session.execute(sql, args).first()
if rec:
return BQRecord(schema=BQParticipantSummarySchema, data=json.loads(rec.resource),
convert_to_enum=False)
return None
def rebuild_bq_participant(p_id, ps_bqgen=None, pdr_bqgen=None, project_id=None, patch_data=None):
"""
Rebuild a BQ record for a specific participant
:param p_id: participant id
:param ps_bqgen: BQParticipantSummaryGenerator object
:param pdr_bqgen: BQPDRParticipantSummaryGenerator object
:param project_id: Project ID override value.
:param patch_data: dict of resource values to update/insert.
:return:
"""
# Allow for batch requests to rebuild participant summary data.
if not ps_bqgen:
ps_bqgen = BQParticipantSummaryGenerator()
if not pdr_bqgen:
from rdr_service.dao.bq_pdr_participant_summary_dao import BQPDRParticipantSummaryGenerator
pdr_bqgen = BQPDRParticipantSummaryGenerator()
# See if this is a partial update.
if patch_data and isinstance(patch_data, dict):
ps_bqr = ps_bqgen.patch_bqrecord(p_id, patch_data)
else:
ps_bqr = ps_bqgen.make_bqrecord(p_id)
# Since the PDR participant summary is primarily a subset of the Participant Summary, call the full
# Participant Summary generator and take what we need from it.
pdr_bqr = pdr_bqgen.make_bqrecord(p_id, ps_bqr=ps_bqr)
w_dao = BigQuerySyncDao()
with w_dao.session() as w_session:
# save the participant summary record if this is a full rebuild.
if not patch_data and isinstance(patch_data, dict):
ps_bqgen.save_bqrecord(p_id, ps_bqr, bqtable=BQParticipantSummary, w_dao=w_dao, w_session=w_session,
project_id=project_id)
# save the PDR participant summary record
pdr_bqgen.save_bqrecord(p_id, pdr_bqr, bqtable=BQPDRParticipantSummary, w_dao=w_dao, w_session=w_session,
project_id=project_id)
w_session.flush()
return ps_bqr
def bq_participant_summary_update_task(p_id):
"""
Cloud task to update the Participant Summary record for the given participant.
:param p_id: Participant ID
"""
rebuild_bq_participant(p_id) | 0.602062 | 0.22642 |
import planckStyle as s
from getdist import types
import six
g = s.getSinglePlotter()
pars = ['omegabh2', 'omegach2', 'theta', 'tau', 'logA', 'ns', 'omegamh2', 'H0', 'omegam', 'age', 'sigma8', 'S8', 'zrei',
'thetastar', 'rdrag']
lines = []
heading = ''
formatter = types.NoLineTableFormatter()
class col(object):
def __init__(self, datatag, title, samples=None, bestfit=False):
if datatag is not None:
if isinstance(datatag,six.string_types): datatag = [datatag]
samples=[]
for tag in datatag:
root = g.getRoot('', tag)
samples += [g.sampleAnalyser.samplesForRoot(root)]
samples[-1].paramNames.setLabelsAndDerivedFromParamNames(g.settings.param_names_for_labels)
self.samples = samples
self.title = title
if bestfit:
self.bestfit = samples[0].getBestFit()
self.marge = samples[0].getMargeStats()
self.marge.addBestFit(self.bestfit)
else:
self.bestfit = None
items = []
items += [col('plikHM_TTTEEE_lowl_lowE_lensing', 'Best fit', bestfit=True)]
plik = col('plikHM_TTTEEE_lowl_lowE_lensing', 'Marginalized')
items += [plik]
camspec = col('CamSpecHM_TTTEEE_lowl_lowE_lensing', '\\camsepc')
items += [camspec]
diff= col(['CamSpecHM_TTTEEE_lowl_lowE_lensing','plikHM_TTTEEE_lowl_lowE_lensing'], r'(\camspec-\plik)/$\sigma_{\rm \plik}$')
items += [diff]
joint = plik.samples[0].getCombinedSamplesWithSamples(camspec.samples[0])
items += [col(None, 'Combined', samples=[joint])]
for i, par in enumerate(pars):
param = plik.samples[0].paramNames.parWithName(par)
line = '$' + param.getLabel() + '$ &'
# line = '\\hline\n' + line
for j, item in enumerate(items):
param = item.samples[0].paramNames.parWithName(par)
if item.bestfit:
latex = item.marge.texValues(formatter, param, limit=1)[1]
else:
if len(item.samples)>1:
diff = (item.samples[0].mean(param)-item.samples[1].mean(param))/item.samples[1].std(param)
latex = r"%+.1f"%diff
else:
latex = item.samples[0].getLatex(params=[par], limit=1)[1][0]
if j: line += ' & '
line += ' $ ' + latex + ' $ '
lines.append(line)
print heading + '\\\\\n\\hline\n' + '\\\\\n'.join(lines) + '\\\\\n' | batch3/outputs/lcdm_default_table.py | import planckStyle as s
from getdist import types
import six
g = s.getSinglePlotter()
pars = ['omegabh2', 'omegach2', 'theta', 'tau', 'logA', 'ns', 'omegamh2', 'H0', 'omegam', 'age', 'sigma8', 'S8', 'zrei',
'thetastar', 'rdrag']
lines = []
heading = ''
formatter = types.NoLineTableFormatter()
class col(object):
def __init__(self, datatag, title, samples=None, bestfit=False):
if datatag is not None:
if isinstance(datatag,six.string_types): datatag = [datatag]
samples=[]
for tag in datatag:
root = g.getRoot('', tag)
samples += [g.sampleAnalyser.samplesForRoot(root)]
samples[-1].paramNames.setLabelsAndDerivedFromParamNames(g.settings.param_names_for_labels)
self.samples = samples
self.title = title
if bestfit:
self.bestfit = samples[0].getBestFit()
self.marge = samples[0].getMargeStats()
self.marge.addBestFit(self.bestfit)
else:
self.bestfit = None
items = []
items += [col('plikHM_TTTEEE_lowl_lowE_lensing', 'Best fit', bestfit=True)]
plik = col('plikHM_TTTEEE_lowl_lowE_lensing', 'Marginalized')
items += [plik]
camspec = col('CamSpecHM_TTTEEE_lowl_lowE_lensing', '\\camsepc')
items += [camspec]
diff= col(['CamSpecHM_TTTEEE_lowl_lowE_lensing','plikHM_TTTEEE_lowl_lowE_lensing'], r'(\camspec-\plik)/$\sigma_{\rm \plik}$')
items += [diff]
joint = plik.samples[0].getCombinedSamplesWithSamples(camspec.samples[0])
items += [col(None, 'Combined', samples=[joint])]
for i, par in enumerate(pars):
param = plik.samples[0].paramNames.parWithName(par)
line = '$' + param.getLabel() + '$ &'
# line = '\\hline\n' + line
for j, item in enumerate(items):
param = item.samples[0].paramNames.parWithName(par)
if item.bestfit:
latex = item.marge.texValues(formatter, param, limit=1)[1]
else:
if len(item.samples)>1:
diff = (item.samples[0].mean(param)-item.samples[1].mean(param))/item.samples[1].std(param)
latex = r"%+.1f"%diff
else:
latex = item.samples[0].getLatex(params=[par], limit=1)[1][0]
if j: line += ' & '
line += ' $ ' + latex + ' $ '
lines.append(line)
print heading + '\\\\\n\\hline\n' + '\\\\\n'.join(lines) + '\\\\\n' | 0.267983 | 0.222098 |
import argparse
import os
import re
import json
from pathlib import Path
def parse_args():
"""
Parse command line arguments
:returns: object -- Object containing command line options
"""
parser = argparse.ArgumentParser(description="Link each minimized structure with the corresponding QM conformations")
parser.add_argument("pdb_input_file", type=str, help="Original pdb file to be linked.")
parser.add_argument("pdb_to_smarts", type=str, help="JSON file connecting pdbs to SMARTS")
parser.add_argument("ids_to_smarts", type=str, help="JSON file connecting ids to SMARTS.")
args = parser.parse_args()
return args.pdb_input_file,args.pdb_to_smarts, args.ids_to_smarts
def parse_json_file(file):
"""
Parsing the date from a json file into a dictionary.
"""
with open(file, 'r') as json_file:
data = json.load(json_file)
return data
def get_ids(mydict,smarts_label):
"""
Given a dictionary and a value for an item returns all the keys with that value.
"""
items = mydict.items()
ids_list = []
for ids,smarts in items:
if smarts == smarts_label: ids_list.append(ids)
return ids_list
def get_path(ids):
"""
Returns a list of the paths of the corresponding conformations in /QM/ of the given ligand.
"""
PATH_FOLDER = 'QM'
path_list = []
for id in ids:
path = os.path.join('/home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1',PATH_FOLDER, id + '.xyz')
path_list.append(path)
return path_list
def main(pdb_file, pdb_to_smarts, ids_to_smarts):
"""
Command line:
----------
>>> python link_structures.py [ligand.pdb] [pdbs_to_smarts.json] [ids_to_smarts.json]
Example:
----------
>>> python link_structures.py 2.pdb pdbs_to_smarts.json ids_to_smarts.json
PATH of the json files:
----------
pdbs_to_smarts.json -> /home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1/pdb/pdbs_to_smarts.json
ids_to_smarts.json -> /home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1/ids/ids_to_smarts.json
"""
args = parse_args()
p = Path(pdb_file)
label = p.name
label = label.replace('.pdb','')
dict_pdb = parse_json_file(pdb_to_smarts)
dict_ids = parse_json_file(ids_to_smarts)
ids = get_ids(dict_ids,dict_pdb.get(label))
return get_path(ids)
if __name__ == "__main__":
pdb_file, pdb_to_smarts, ids_to_smarts = parse_args()
main(pdb_file, pdb_to_smarts, ids_to_smarts) | benchmarks/geometry/link_structures.py | import argparse
import os
import re
import json
from pathlib import Path
def parse_args():
"""
Parse command line arguments
:returns: object -- Object containing command line options
"""
parser = argparse.ArgumentParser(description="Link each minimized structure with the corresponding QM conformations")
parser.add_argument("pdb_input_file", type=str, help="Original pdb file to be linked.")
parser.add_argument("pdb_to_smarts", type=str, help="JSON file connecting pdbs to SMARTS")
parser.add_argument("ids_to_smarts", type=str, help="JSON file connecting ids to SMARTS.")
args = parser.parse_args()
return args.pdb_input_file,args.pdb_to_smarts, args.ids_to_smarts
def parse_json_file(file):
"""
Parsing the date from a json file into a dictionary.
"""
with open(file, 'r') as json_file:
data = json.load(json_file)
return data
def get_ids(mydict,smarts_label):
"""
Given a dictionary and a value for an item returns all the keys with that value.
"""
items = mydict.items()
ids_list = []
for ids,smarts in items:
if smarts == smarts_label: ids_list.append(ids)
return ids_list
def get_path(ids):
"""
Returns a list of the paths of the corresponding conformations in /QM/ of the given ligand.
"""
PATH_FOLDER = 'QM'
path_list = []
for id in ids:
path = os.path.join('/home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1',PATH_FOLDER, id + '.xyz')
path_list.append(path)
return path_list
def main(pdb_file, pdb_to_smarts, ids_to_smarts):
"""
Command line:
----------
>>> python link_structures.py [ligand.pdb] [pdbs_to_smarts.json] [ids_to_smarts.json]
Example:
----------
>>> python link_structures.py 2.pdb pdbs_to_smarts.json ids_to_smarts.json
PATH of the json files:
----------
pdbs_to_smarts.json -> /home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1/pdb/pdbs_to_smarts.json
ids_to_smarts.json -> /home/lauramalo/repos/peleffy-benchmarks/benchmarks/data/SMIRNOFF_coverage_set_1/ids/ids_to_smarts.json
"""
args = parse_args()
p = Path(pdb_file)
label = p.name
label = label.replace('.pdb','')
dict_pdb = parse_json_file(pdb_to_smarts)
dict_ids = parse_json_file(ids_to_smarts)
ids = get_ids(dict_ids,dict_pdb.get(label))
return get_path(ids)
if __name__ == "__main__":
pdb_file, pdb_to_smarts, ids_to_smarts = parse_args()
main(pdb_file, pdb_to_smarts, ids_to_smarts) | 0.515376 | 0.163947 |
from __future__ import annotations
import dataclasses
import inspect
import pathlib
import types
from typing import Any, Optional, Type, Union
from . import module
from . import package
from . import traits
@dataclasses.dataclass
class Inspector(object):
"""Inspector factory which returns the appropraite Inspector subclass.
Args:
item (Any): unknown item to examine.
"""
item: Any
""" Initialization Methods """
def __new__(cls, item: Any, *args: Any, **kwargs: Any) -> None:
"""Returns Inspector subclass based on type of 'item'."""
if isinstance(item, types.ModuleType):
return ModuleInspector(module = item, *args, **kwargs)
elif isinstance(item, (pathlib.Path, str)):
return PackageInspector(folder = item, *args, **kwargs)
elif inspect.isclass(item):
return ClassInspector(item = item, *args, **kwargs)
elif isinstance(item, object):
return InstanceInspector(item = item, *args, **kwargs)
else:
raise TypeError(f'item must be a module, path, class, or object')
""" Properties """
@property
def attributes(self) -> dict[str, Any]:
"""dict of attribute names and values in 'item'.
Returns:
dict[str, Any]: keys are attribute names and values are attribute
values.
"""
return traits.get_attributes(
item = self.item,
include_private = self.include_private)
@property
def contains(self) -> Optional[Union[
tuple[Type[Any], ...],
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]]:
"""Types that 'item' contains.
Returns:
Optional[Union[tuple[Type[Any], ...], tuple[tuple[Type[Any], ...],
tuple[Type[Any], ...]]]]:: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
return traits.has_types(item = self.item)
@property
def name(self) -> Optional[str]:
"""str name of 'item'.
Returns:
str: inferred name of the stored 'item'.
"""
return traits.get_name(item = self.item)
@property
def type(self) -> Type[Any]:
"""Data type of 'item'.
Returns:
Type[Any]: type of the stored 'item'.
"""
return type(self.item)
@property
def variables(self) -> dict[str, Any]:
"""dict of variable names and variable values in 'item'.
'variables' are all attributes that are neither methods nor properties.
Returns:
dict[str, Any]: keys are variable names and values are variable
values.
"""
return traits.get_variables(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class ClassInspector(Inspector):
"""Inspector for accessing class information from 'item'.
Args:
item (Type[Any]): class to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
"""
item: Type[Any]
include_private: bool = True
""" Properties """
@property
def annotations(self) -> dict[str, Type[Any]]:
"""dict of parameters and annotated type hints in 'item'.
Returns:
dict[str, Type[Any]]: keys are parameter/attribute names and values
are type hints.
"""
return traits.get_annotations(
item = self.item,
include_private = self.include_private)
@property
def attributes(self) -> list[str]:
"""Attribute names in 'item'.
Returns:
list[str]: names of attributes.
"""
return traits.name_attributes(
item = self.item,
include_private = self.include_private)
@property
def methods(self) -> dict[str, types.MethodType]:
"""dict of method names and methods in 'item'.
Returns:
dict[str, types.MethodType]: keys are method names and values are
methods.
"""
return traits.get_methods(
item = self.item,
include_private = self.include_private)
@property
def parameters(self) -> list[str]:
"""Names of parameters from a dataclass: 'item'.
Returns:
list[str]: names of parameters for a dataclass.
"""
return traits.name_parameters(item = self.item)
@property
def properties(self) -> list[str]:
"""Property names in 'item'.
Returns:
list[str]: names of properties.
"""
return traits.get_properties(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@property
def variables(self) -> list[str]:
"""Variable names in 'item'.
'variables' are all attributes that are neither methods nor properties.
Returns:
list[str]: names of variables in 'item'.
"""
return traits.get_variables(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class InstanceInspector(Inspector):
"""Inspector for accessing instance information from 'item'.
Args:
item (Type[Any]): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
"""
item: object
include_private: bool = True
""" Properties """
@property
def annotations(self) -> dict[str, Type[Any]]:
"""dict of parameters and annotated type hints in 'item'.
Returns:
dict[str, Type[Any]]: keys are parameter/attribute names and values
are type hints.
"""
return traits.get_annotations(
item = self.item,
include_private = self.include_private)
@property
def methods(self) -> dict[str, types.MethodType]:
"""dict of method names and methods in 'item'.
Returns:
dict[str, types.MethodType]: keys are method names and values are
methods.
"""
return traits.get_methods(
item = self.item,
include_private = self.include_private)
@property
def parameters(self) -> list[str]:
"""Names of parameters from a dataclass in 'item'.
Returns:
list[str]: names of parameters for a dataclass.
"""
return traits.name_parameters(item = self.item)
@property
def properties(self) -> dict[str, Any]:
"""dict of property names and property values in 'item'.
Returns:
dict[str, Any]: keys are property names and values are property
values.
"""
return traits.get_properties(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class ModuleInspector(Inspector):
"""Inspector for accessing module information from 'item'.
Args:
item (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (True). Defauls to False.
"""
item: types.ModuleType
include_private: bool = True
""" Properties """
@property
def classes(self) -> dict[str, Type[Any]]:
"""dict of class names and classes in 'item'.
Returns:
dict[str, Type[Any]: keys are class names and values are classes.
"""
return module.get_classes(
item = self.item,
include_private = self.include_private)
@property
def functions(self) -> dict[str, types.FunctionType]:
"""dict of function names and functions in 'item'.
Returns:
dict[str, types.FunctionType]: keys are function names and values
are functions.
"""
return module.get_functions(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and method signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class PackageInspector(Inspector):
"""Inspector for accessing package information from 'item'.
Attributes:
item (Union[pathlib.Path, str]): folder for which information should
be made available.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
include_subfolders (bool): whether to include subitems in the package.
Defaults to True.
"""
item: Union[pathlib.Path, str]
include_private: bool = True
include_subfolders: bool = True
""" Initialization Methods """
def __post_init__(self) -> None:
"""Initializes class instance attributes."""
self.item = convert.pathlibify(item = self.item)
""" Properties """
@property
def files(self) -> list[pathlib.Path]:
"""Non-python-module file paths in 'item'.
Returns:
list[pathlib.Path]: list of non-python-module file paths.
"""
return package.get_file_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def folders(self) -> list[pathlib.Path]:
"""Folder paths in 'item'.
Returns:
list[pathlib.Path]: list of folder paths.
"""
return package.get_folder_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def modules(self) -> dict[str, types.ModuleType]:
"""dict of python module names and modules in 'item'.
Args:
item (Union[str, pathlib.Path]): path of folder to examine.
Returns:
dict[str, types.ModuleType]: dict with str key names of python
modules and values as the corresponding modules.
"""
return package.get_modules(
item = self.item,
recursive = self.include_subfolders)
@property
def module_paths(self) -> list[pathlib.Path]:
"""Python module file paths in 'item'.
Returns:
list[pathlib.Path]: list of python-module file paths.
"""
return package.get_module_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def paths(self) -> list[pathlib.Path]:
"""All paths in 'item'.
Returns:
list[pathlib.Path]: list of all paths.
"""
return package.get_paths(
item = self.item,
recursive = self.include_subfolders) | amos/observe/examine.py | from __future__ import annotations
import dataclasses
import inspect
import pathlib
import types
from typing import Any, Optional, Type, Union
from . import module
from . import package
from . import traits
@dataclasses.dataclass
class Inspector(object):
"""Inspector factory which returns the appropraite Inspector subclass.
Args:
item (Any): unknown item to examine.
"""
item: Any
""" Initialization Methods """
def __new__(cls, item: Any, *args: Any, **kwargs: Any) -> None:
"""Returns Inspector subclass based on type of 'item'."""
if isinstance(item, types.ModuleType):
return ModuleInspector(module = item, *args, **kwargs)
elif isinstance(item, (pathlib.Path, str)):
return PackageInspector(folder = item, *args, **kwargs)
elif inspect.isclass(item):
return ClassInspector(item = item, *args, **kwargs)
elif isinstance(item, object):
return InstanceInspector(item = item, *args, **kwargs)
else:
raise TypeError(f'item must be a module, path, class, or object')
""" Properties """
@property
def attributes(self) -> dict[str, Any]:
"""dict of attribute names and values in 'item'.
Returns:
dict[str, Any]: keys are attribute names and values are attribute
values.
"""
return traits.get_attributes(
item = self.item,
include_private = self.include_private)
@property
def contains(self) -> Optional[Union[
tuple[Type[Any], ...],
tuple[tuple[Type[Any], ...], tuple[Type[Any], ...]]]]:
"""Types that 'item' contains.
Returns:
Optional[Union[tuple[Type[Any], ...], tuple[tuple[Type[Any], ...],
tuple[Type[Any], ...]]]]:: returns the types of things contained
in 'item'. Returns None if 'item' is not a container.
"""
return traits.has_types(item = self.item)
@property
def name(self) -> Optional[str]:
"""str name of 'item'.
Returns:
str: inferred name of the stored 'item'.
"""
return traits.get_name(item = self.item)
@property
def type(self) -> Type[Any]:
"""Data type of 'item'.
Returns:
Type[Any]: type of the stored 'item'.
"""
return type(self.item)
@property
def variables(self) -> dict[str, Any]:
"""dict of variable names and variable values in 'item'.
'variables' are all attributes that are neither methods nor properties.
Returns:
dict[str, Any]: keys are variable names and values are variable
values.
"""
return traits.get_variables(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class ClassInspector(Inspector):
"""Inspector for accessing class information from 'item'.
Args:
item (Type[Any]): class to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
"""
item: Type[Any]
include_private: bool = True
""" Properties """
@property
def annotations(self) -> dict[str, Type[Any]]:
"""dict of parameters and annotated type hints in 'item'.
Returns:
dict[str, Type[Any]]: keys are parameter/attribute names and values
are type hints.
"""
return traits.get_annotations(
item = self.item,
include_private = self.include_private)
@property
def attributes(self) -> list[str]:
"""Attribute names in 'item'.
Returns:
list[str]: names of attributes.
"""
return traits.name_attributes(
item = self.item,
include_private = self.include_private)
@property
def methods(self) -> dict[str, types.MethodType]:
"""dict of method names and methods in 'item'.
Returns:
dict[str, types.MethodType]: keys are method names and values are
methods.
"""
return traits.get_methods(
item = self.item,
include_private = self.include_private)
@property
def parameters(self) -> list[str]:
"""Names of parameters from a dataclass: 'item'.
Returns:
list[str]: names of parameters for a dataclass.
"""
return traits.name_parameters(item = self.item)
@property
def properties(self) -> list[str]:
"""Property names in 'item'.
Returns:
list[str]: names of properties.
"""
return traits.get_properties(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@property
def variables(self) -> list[str]:
"""Variable names in 'item'.
'variables' are all attributes that are neither methods nor properties.
Returns:
list[str]: names of variables in 'item'.
"""
return traits.get_variables(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class InstanceInspector(Inspector):
"""Inspector for accessing instance information from 'item'.
Args:
item (Type[Any]): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
"""
item: object
include_private: bool = True
""" Properties """
@property
def annotations(self) -> dict[str, Type[Any]]:
"""dict of parameters and annotated type hints in 'item'.
Returns:
dict[str, Type[Any]]: keys are parameter/attribute names and values
are type hints.
"""
return traits.get_annotations(
item = self.item,
include_private = self.include_private)
@property
def methods(self) -> dict[str, types.MethodType]:
"""dict of method names and methods in 'item'.
Returns:
dict[str, types.MethodType]: keys are method names and values are
methods.
"""
return traits.get_methods(
item = self.item,
include_private = self.include_private)
@property
def parameters(self) -> list[str]:
"""Names of parameters from a dataclass in 'item'.
Returns:
list[str]: names of parameters for a dataclass.
"""
return traits.name_parameters(item = self.item)
@property
def properties(self) -> dict[str, Any]:
"""dict of property names and property values in 'item'.
Returns:
dict[str, Any]: keys are property names and values are property
values.
"""
return traits.get_properties(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class ModuleInspector(Inspector):
"""Inspector for accessing module information from 'item'.
Args:
item (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (True). Defauls to False.
"""
item: types.ModuleType
include_private: bool = True
""" Properties """
@property
def classes(self) -> dict[str, Type[Any]]:
"""dict of class names and classes in 'item'.
Returns:
dict[str, Type[Any]: keys are class names and values are classes.
"""
return module.get_classes(
item = self.item,
include_private = self.include_private)
@property
def functions(self) -> dict[str, types.FunctionType]:
"""dict of function names and functions in 'item'.
Returns:
dict[str, types.FunctionType]: keys are function names and values
are functions.
"""
return module.get_functions(
item = self.item,
include_private = self.include_private)
@property
def signatures(self) -> dict[str, inspect.Signature]:
"""dict of method names and method signatures in 'item'.
Returns:
dict[str, inspect.Signature]: keys are method names and values are
signatures for those methods.
"""
return traits.get_signatures(
item = self.item,
include_private = self.include_private)
@dataclasses.dataclass
class PackageInspector(Inspector):
"""Inspector for accessing package information from 'item'.
Attributes:
item (Union[pathlib.Path, str]): folder for which information should
be made available.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to True.
include_subfolders (bool): whether to include subitems in the package.
Defaults to True.
"""
item: Union[pathlib.Path, str]
include_private: bool = True
include_subfolders: bool = True
""" Initialization Methods """
def __post_init__(self) -> None:
"""Initializes class instance attributes."""
self.item = convert.pathlibify(item = self.item)
""" Properties """
@property
def files(self) -> list[pathlib.Path]:
"""Non-python-module file paths in 'item'.
Returns:
list[pathlib.Path]: list of non-python-module file paths.
"""
return package.get_file_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def folders(self) -> list[pathlib.Path]:
"""Folder paths in 'item'.
Returns:
list[pathlib.Path]: list of folder paths.
"""
return package.get_folder_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def modules(self) -> dict[str, types.ModuleType]:
"""dict of python module names and modules in 'item'.
Args:
item (Union[str, pathlib.Path]): path of folder to examine.
Returns:
dict[str, types.ModuleType]: dict with str key names of python
modules and values as the corresponding modules.
"""
return package.get_modules(
item = self.item,
recursive = self.include_subfolders)
@property
def module_paths(self) -> list[pathlib.Path]:
"""Python module file paths in 'item'.
Returns:
list[pathlib.Path]: list of python-module file paths.
"""
return package.get_module_paths(
item = self.item,
recursive = self.include_subfolders)
@property
def paths(self) -> list[pathlib.Path]:
"""All paths in 'item'.
Returns:
list[pathlib.Path]: list of all paths.
"""
return package.get_paths(
item = self.item,
recursive = self.include_subfolders) | 0.944625 | 0.262357 |
import smtplib
import pymongo
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
with open('/home/idong-gi/VINEDING/email_information') as data_file:
email_data = json.load(data_file)
MAIL_ACCOUNT = email_data["id"]
MAIL_PASSWORD = email_data["pw"]
TITLE = "VINE-DING 에서 당신의 검색어가 준비 되었음을 알려드립니다."
CONTENT = "저희 VINE-DING 을 이용해 주셔서 감사합니다. 다음 링크를 클릭하여 당신의 검색어를 확인해 주세요 http://vineding.tk/emotion?search_word="
MONGODB_PORT = 26543
MONGODB_IP = "localhost"
def sending_email_using_gmail(to, title, description):
from_address = MAIL_ACCOUNT
msg = get_message_formatted(from_address, to, title, description)
try:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.starttls()
smtp.login(MAIL_ACCOUNT, MAIL_PASSWORD)
smtp.sendmail(from_address, to, msg.as_string())
except Exception as e:
print(e)
print("email_send_error")
def get_message_formatted(from_address, to, title, description):
msg = MIMEMultipart('localhost')
msg['Subject'] = title
msg['From'] = from_address
content = MIMEText(description, 'plain', _charset="utf-8")
msg.attach(content)
return msg
def confirm_search_list(search_word):
try:
client = pymongo.MongoClient(MONGODB_IP, MONGODB_PORT)
database_name = "email_list"
database = client[database_name]
collection = database.email_list
list = collection.find({"search_word": search_word})
for i in list:
print(i['email'])
sending_email_using_gmail(i['email'], TITLE, CONTENT+search_word)
collection.remove({"search_word": search_word})
except Exception as e:
print(e)
while (True):
try:
client = pymongo.MongoClient(MONGODB_IP, MONGODB_PORT)
database_name = "twitter_api"
database = client[database_name]
collection = database.collection_names(include_system_collections=False)
for collect in collection:
if "mapReduce" in collect:
confirm_search_list(collect.replace('mapReduce', ''))
except Exception as e:
print(e)
print("wait 120 second")
time.sleep(120) | ProcessInformation/python/email_transfer/email_transfer/EmailTransfer.py | import smtplib
import pymongo
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
with open('/home/idong-gi/VINEDING/email_information') as data_file:
email_data = json.load(data_file)
MAIL_ACCOUNT = email_data["id"]
MAIL_PASSWORD = email_data["pw"]
TITLE = "VINE-DING 에서 당신의 검색어가 준비 되었음을 알려드립니다."
CONTENT = "저희 VINE-DING 을 이용해 주셔서 감사합니다. 다음 링크를 클릭하여 당신의 검색어를 확인해 주세요 http://vineding.tk/emotion?search_word="
MONGODB_PORT = 26543
MONGODB_IP = "localhost"
def sending_email_using_gmail(to, title, description):
from_address = MAIL_ACCOUNT
msg = get_message_formatted(from_address, to, title, description)
try:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.starttls()
smtp.login(MAIL_ACCOUNT, MAIL_PASSWORD)
smtp.sendmail(from_address, to, msg.as_string())
except Exception as e:
print(e)
print("email_send_error")
def get_message_formatted(from_address, to, title, description):
msg = MIMEMultipart('localhost')
msg['Subject'] = title
msg['From'] = from_address
content = MIMEText(description, 'plain', _charset="utf-8")
msg.attach(content)
return msg
def confirm_search_list(search_word):
try:
client = pymongo.MongoClient(MONGODB_IP, MONGODB_PORT)
database_name = "email_list"
database = client[database_name]
collection = database.email_list
list = collection.find({"search_word": search_word})
for i in list:
print(i['email'])
sending_email_using_gmail(i['email'], TITLE, CONTENT+search_word)
collection.remove({"search_word": search_word})
except Exception as e:
print(e)
while (True):
try:
client = pymongo.MongoClient(MONGODB_IP, MONGODB_PORT)
database_name = "twitter_api"
database = client[database_name]
collection = database.collection_names(include_system_collections=False)
for collect in collection:
if "mapReduce" in collect:
confirm_search_list(collect.replace('mapReduce', ''))
except Exception as e:
print(e)
print("wait 120 second")
time.sleep(120) | 0.099815 | 0.132683 |
from __future__ import print_function
import argparse
# [START dlp_list_jobs]
def list_dlp_jobs(project, filter_string=None, job_type=None):
"""Uses the Data Loss Prevention API to lists DLP jobs that match the
specified filter in the request.
Args:
project: The project id to use as a parent resource.
filter: (Optional) Allows filtering.
Supported syntax:
* Filter expressions are made up of one or more restrictions.
* Restrictions can be combined by 'AND' or 'OR' logical operators.
A sequence of restrictions implicitly uses 'AND'.
* A restriction has the form of '<field> <operator> <value>'.
* Supported fields/values for inspect jobs:
- `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED
- `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY
- `trigger_name` - The resource name of the trigger that
created job.
* Supported fields for risk analysis jobs:
- `state` - RUNNING|CANCELED|FINISHED|FAILED
* The operator must be '=' or '!='.
Examples:
* inspected_storage = cloud_storage AND state = done
* inspected_storage = cloud_storage OR inspected_storage = bigquery
* inspected_storage = cloud_storage AND
(state = done OR state = canceled)
type: (Optional) The type of job. Defaults to 'INSPECT'.
Choices:
DLP_JOB_TYPE_UNSPECIFIED
INSPECT_JOB: The job inspected content for sensitive data.
RISK_ANALYSIS_JOB: The job executed a Risk Analysis computation.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp.DlpServiceClient()
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Job type dictionary
job_type_to_int = {
'DLP_JOB_TYPE_UNSPECIFIED':
google.cloud.dlp.enums.DlpJobType.DLP_JOB_TYPE_UNSPECIFIED,
'INSPECT_JOB': google.cloud.dlp.enums.DlpJobType.INSPECT_JOB,
'RISK_ANALYSIS_JOB':
google.cloud.dlp.enums.DlpJobType.RISK_ANALYSIS_JOB
}
# If job type is specified, convert job type to number through enums.
if job_type:
job_type = job_type_to_int[job_type]
# Call the API to get a list of jobs.
response = dlp.list_dlp_jobs(
parent,
filter_=filter_string,
type_=job_type)
# Iterate over results.
for job in response:
print('Job: %s; status: %s' % (job.name, job.JobState.Name(job.state)))
# [END dlp_list_jobs]
# [START dlp_delete_job]
def delete_dlp_job(project, job_name):
"""Uses the Data Loss Prevention API to delete a long-running DLP job.
Args:
project: The project id to use as a parent resource.
job_name: The name of the DlpJob resource to be deleted.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp.DlpServiceClient()
# Convert the project id and job name into a full resource id.
name = dlp.dlp_job_path(project, job_name)
# Call the API to delete job.
dlp.delete_dlp_job(name)
print('Successfully deleted %s' % job_name)
# [END dlp_delete_job]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(
dest='content', help='Select how to submit content to the API.')
subparsers.required = True
list_parser = subparsers.add_parser(
'list',
help='List Data Loss Prevention API jobs corresponding to a given '
'filter.')
list_parser.add_argument(
'project',
help='The project id to use as a parent resource.')
list_parser.add_argument(
'-f', '--filter',
help='Filter expressions are made up of one or more restrictions.')
list_parser.add_argument(
'-t', '--type',
choices=['DLP_JOB_TYPE_UNSPECIFIED', 'INSPECT_JOB',
'RISK_ANALYSIS_JOB'],
help='The type of job. API defaults to "INSPECT"')
delete_parser = subparsers.add_parser(
'delete',
help='Delete results of a Data Loss Prevention API job.')
delete_parser.add_argument(
'project',
help='The project id to use as a parent resource.')
delete_parser.add_argument(
'job_name',
help='The name of the DlpJob resource to be deleted. '
'Example: X-#####')
args = parser.parse_args()
if args.content == 'list':
list_dlp_jobs(
args.project,
filter_string=args.filter,
job_type=args.type)
elif args.content == 'delete':
delete_dlp_job(args.project, args.job_name) | dlp/jobs.py | from __future__ import print_function
import argparse
# [START dlp_list_jobs]
def list_dlp_jobs(project, filter_string=None, job_type=None):
"""Uses the Data Loss Prevention API to lists DLP jobs that match the
specified filter in the request.
Args:
project: The project id to use as a parent resource.
filter: (Optional) Allows filtering.
Supported syntax:
* Filter expressions are made up of one or more restrictions.
* Restrictions can be combined by 'AND' or 'OR' logical operators.
A sequence of restrictions implicitly uses 'AND'.
* A restriction has the form of '<field> <operator> <value>'.
* Supported fields/values for inspect jobs:
- `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED
- `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY
- `trigger_name` - The resource name of the trigger that
created job.
* Supported fields for risk analysis jobs:
- `state` - RUNNING|CANCELED|FINISHED|FAILED
* The operator must be '=' or '!='.
Examples:
* inspected_storage = cloud_storage AND state = done
* inspected_storage = cloud_storage OR inspected_storage = bigquery
* inspected_storage = cloud_storage AND
(state = done OR state = canceled)
type: (Optional) The type of job. Defaults to 'INSPECT'.
Choices:
DLP_JOB_TYPE_UNSPECIFIED
INSPECT_JOB: The job inspected content for sensitive data.
RISK_ANALYSIS_JOB: The job executed a Risk Analysis computation.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp.DlpServiceClient()
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Job type dictionary
job_type_to_int = {
'DLP_JOB_TYPE_UNSPECIFIED':
google.cloud.dlp.enums.DlpJobType.DLP_JOB_TYPE_UNSPECIFIED,
'INSPECT_JOB': google.cloud.dlp.enums.DlpJobType.INSPECT_JOB,
'RISK_ANALYSIS_JOB':
google.cloud.dlp.enums.DlpJobType.RISK_ANALYSIS_JOB
}
# If job type is specified, convert job type to number through enums.
if job_type:
job_type = job_type_to_int[job_type]
# Call the API to get a list of jobs.
response = dlp.list_dlp_jobs(
parent,
filter_=filter_string,
type_=job_type)
# Iterate over results.
for job in response:
print('Job: %s; status: %s' % (job.name, job.JobState.Name(job.state)))
# [END dlp_list_jobs]
# [START dlp_delete_job]
def delete_dlp_job(project, job_name):
"""Uses the Data Loss Prevention API to delete a long-running DLP job.
Args:
project: The project id to use as a parent resource.
job_name: The name of the DlpJob resource to be deleted.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp.DlpServiceClient()
# Convert the project id and job name into a full resource id.
name = dlp.dlp_job_path(project, job_name)
# Call the API to delete job.
dlp.delete_dlp_job(name)
print('Successfully deleted %s' % job_name)
# [END dlp_delete_job]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(
dest='content', help='Select how to submit content to the API.')
subparsers.required = True
list_parser = subparsers.add_parser(
'list',
help='List Data Loss Prevention API jobs corresponding to a given '
'filter.')
list_parser.add_argument(
'project',
help='The project id to use as a parent resource.')
list_parser.add_argument(
'-f', '--filter',
help='Filter expressions are made up of one or more restrictions.')
list_parser.add_argument(
'-t', '--type',
choices=['DLP_JOB_TYPE_UNSPECIFIED', 'INSPECT_JOB',
'RISK_ANALYSIS_JOB'],
help='The type of job. API defaults to "INSPECT"')
delete_parser = subparsers.add_parser(
'delete',
help='Delete results of a Data Loss Prevention API job.')
delete_parser.add_argument(
'project',
help='The project id to use as a parent resource.')
delete_parser.add_argument(
'job_name',
help='The name of the DlpJob resource to be deleted. '
'Example: X-#####')
args = parser.parse_args()
if args.content == 'list':
list_dlp_jobs(
args.project,
filter_string=args.filter,
job_type=args.type)
elif args.content == 'delete':
delete_dlp_job(args.project, args.job_name) | 0.87444 | 0.405625 |
import logging
import typing
from enum import Enum
from ..exceptions import KeyboardException
from vk.constants import JSON_LIBRARY
logger = logging.getLogger(__name__)
# Keyboards: https://vk.com/dev/bots_docs_3
class ButtonColor(Enum):
PRIMARY = "primary" # blue
SECONDARY = "secondary" # white
NEGATIVE = "negative" # red
POSITIVE = "positive" # green
class ButtonType(Enum):
TEXT = "text"
LOCATION = "location"
VKPAY = "vkpay"
VKAPPS = "open_app"
class Keyboard:
def __init__(self, one_time: bool, inline: bool = False):
"""
Create a keyboard object
:param one_time:
"""
self.one_time = one_time
self.buttons = [[]]
self.keyboard = {
"one_time": one_time,
"buttons": self.buttons,
"inline": inline,
}
@staticmethod
def generate_payload(payload: dict) -> str:
if payload is None:
payload = ""
return payload
def add_row(self):
"""
:return:
"""
if len(self.buttons) >= 10:
raise KeyboardException("Max 10 rows")
self.buttons.append([])
def _add_button(self, action: dict) -> None:
"""
:param action:
:return:
"""
current_row = self.buttons[-1]
current_row.append(action)
def add_text_button(
self,
text: str,
color: ButtonColor = ButtonColor.PRIMARY,
payload: dict = None,
):
"""
:param text:
:param color:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
if not isinstance(color, ButtonColor):
logger.warning("Invalid button color. Used 'PRIMARY'")
color = ButtonColor.PRIMARY.value
else:
color = color.value
if isinstance(text, str):
if len(text) < 1:
raise KeyboardException("Invalid text")
else:
raise KeyboardException("Invalid text")
action = {
"action": {
"type": ButtonType.TEXT.value,
"payload": payload,
"label": text,
},
"color": color,
}
self._add_button(action)
def add_location_button(self, payload: dict = None):
"""
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {"type": ButtonType.LOCATION.value, "payload": payload}
}
self._add_button(action)
def add_vkpay_button(self, hash: str, payload: dict = None): # noqa
"""
:param hash:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {
"type": ButtonType.VKPAY.value,
"payload": payload,
"hash": hash,
}
}
self._add_button(action)
def add_vkapps_button(
self, app_id: int, owner_id: int, label: str, payload: dict = None
):
"""
:param app_id:
:param owner_id:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {
"type": ButtonType.VKAPPS.value,
"app_id": app_id,
"owner_id": owner_id,
"payload": payload,
"label": label,
}
}
self._add_button(action)
def get_keyboard(self) -> typing.AnyStr:
"""
Get keyboard json to send.
If keyboard is 'static', you can generate json once and send it every time.
:return:
"""
return JSON_LIBRARY.dumps(self.keyboard)
@classmethod
def get_empty_keyboard(cls: "Keyboard") -> typing.AnyStr:
"""
:return:
"""
keyboard = cls(one_time=True) # noqa
keyboard.keyboard["buttons"] = []
return keyboard.get_keyboard() | vk/keyboards/keyboard.py | import logging
import typing
from enum import Enum
from ..exceptions import KeyboardException
from vk.constants import JSON_LIBRARY
logger = logging.getLogger(__name__)
# Keyboards: https://vk.com/dev/bots_docs_3
class ButtonColor(Enum):
PRIMARY = "primary" # blue
SECONDARY = "secondary" # white
NEGATIVE = "negative" # red
POSITIVE = "positive" # green
class ButtonType(Enum):
TEXT = "text"
LOCATION = "location"
VKPAY = "vkpay"
VKAPPS = "open_app"
class Keyboard:
def __init__(self, one_time: bool, inline: bool = False):
"""
Create a keyboard object
:param one_time:
"""
self.one_time = one_time
self.buttons = [[]]
self.keyboard = {
"one_time": one_time,
"buttons": self.buttons,
"inline": inline,
}
@staticmethod
def generate_payload(payload: dict) -> str:
if payload is None:
payload = ""
return payload
def add_row(self):
"""
:return:
"""
if len(self.buttons) >= 10:
raise KeyboardException("Max 10 rows")
self.buttons.append([])
def _add_button(self, action: dict) -> None:
"""
:param action:
:return:
"""
current_row = self.buttons[-1]
current_row.append(action)
def add_text_button(
self,
text: str,
color: ButtonColor = ButtonColor.PRIMARY,
payload: dict = None,
):
"""
:param text:
:param color:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
if not isinstance(color, ButtonColor):
logger.warning("Invalid button color. Used 'PRIMARY'")
color = ButtonColor.PRIMARY.value
else:
color = color.value
if isinstance(text, str):
if len(text) < 1:
raise KeyboardException("Invalid text")
else:
raise KeyboardException("Invalid text")
action = {
"action": {
"type": ButtonType.TEXT.value,
"payload": payload,
"label": text,
},
"color": color,
}
self._add_button(action)
def add_location_button(self, payload: dict = None):
"""
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {"type": ButtonType.LOCATION.value, "payload": payload}
}
self._add_button(action)
def add_vkpay_button(self, hash: str, payload: dict = None): # noqa
"""
:param hash:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {
"type": ButtonType.VKPAY.value,
"payload": payload,
"hash": hash,
}
}
self._add_button(action)
def add_vkapps_button(
self, app_id: int, owner_id: int, label: str, payload: dict = None
):
"""
:param app_id:
:param owner_id:
:param payload:
:return:
"""
payload = self.generate_payload(payload)
action = {
"action": {
"type": ButtonType.VKAPPS.value,
"app_id": app_id,
"owner_id": owner_id,
"payload": payload,
"label": label,
}
}
self._add_button(action)
def get_keyboard(self) -> typing.AnyStr:
"""
Get keyboard json to send.
If keyboard is 'static', you can generate json once and send it every time.
:return:
"""
return JSON_LIBRARY.dumps(self.keyboard)
@classmethod
def get_empty_keyboard(cls: "Keyboard") -> typing.AnyStr:
"""
:return:
"""
keyboard = cls(one_time=True) # noqa
keyboard.keyboard["buttons"] = []
return keyboard.get_keyboard() | 0.714728 | 0.207696 |
from poi.infer.base import BaseModel
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
# a helper to host device storage
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem, shape):
self.host = host_mem
self.device = device_mem
self.shape = shape
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class TRTModel(BaseModel):
def __init__(self, trt_path, ctx_id,
batch_size=None, transforms=None, input_names=None,
base_record=None, logger=None):
BaseModel.__init__(self, batch_size, transforms, input_names, base_record, logger)
self.name = "TensorRT"
self.init_model(trt_path, ctx_id)
def init_model(self, trt_path, ctx_id):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
cuda.init()
device = cuda.Device(ctx_id)
self.ctx = device.make_context()
with open(trt_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
self.input_buffs = {}
self.output_buffs = {}
self.bindings = []
self.stream = cuda.Stream()
for name in engine:
shape = engine.get_binding_shape(name)
size = trt.volume(shape) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(name))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
self.bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(name):
self.input_buffs[name] = HostDeviceMem(host_mem, device_mem, shape)
else:
self.output_buffs[name] = HostDeviceMem(host_mem, device_mem, shape)
self.model = engine.create_execution_context()
self.logger.info("Warmup up...")
self.inference_loops(10)
def inference(self):
for name in self.inputs:
# Transfer input data to page locked memory
np.copyto(self.input_buffs[name].host, self.inputs[name].flatten())
# Transfer input data to the GPU.
cuda.memcpy_htod_async(self.input_buffs[name].device,
self.input_buffs[name].host, self.stream)
# Run inference.
self.model.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
for name in self.output_buffs:
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(self.output_buffs[name].host,
self.output_buffs[name].device, self.stream)
# Synchronize the stream
self.stream.synchronize()
# Return only the host outputs.
return {name: np.reshape(self.output_buffs[name].host, self.output_buffs[name].shape)
for name in self.output_buffs}
def close(self):
self.ctx.pop() | poi/infer/trt.py | from poi.infer.base import BaseModel
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
# a helper to host device storage
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem, shape):
self.host = host_mem
self.device = device_mem
self.shape = shape
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class TRTModel(BaseModel):
def __init__(self, trt_path, ctx_id,
batch_size=None, transforms=None, input_names=None,
base_record=None, logger=None):
BaseModel.__init__(self, batch_size, transforms, input_names, base_record, logger)
self.name = "TensorRT"
self.init_model(trt_path, ctx_id)
def init_model(self, trt_path, ctx_id):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
cuda.init()
device = cuda.Device(ctx_id)
self.ctx = device.make_context()
with open(trt_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
self.input_buffs = {}
self.output_buffs = {}
self.bindings = []
self.stream = cuda.Stream()
for name in engine:
shape = engine.get_binding_shape(name)
size = trt.volume(shape) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(name))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
self.bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(name):
self.input_buffs[name] = HostDeviceMem(host_mem, device_mem, shape)
else:
self.output_buffs[name] = HostDeviceMem(host_mem, device_mem, shape)
self.model = engine.create_execution_context()
self.logger.info("Warmup up...")
self.inference_loops(10)
def inference(self):
for name in self.inputs:
# Transfer input data to page locked memory
np.copyto(self.input_buffs[name].host, self.inputs[name].flatten())
# Transfer input data to the GPU.
cuda.memcpy_htod_async(self.input_buffs[name].device,
self.input_buffs[name].host, self.stream)
# Run inference.
self.model.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
for name in self.output_buffs:
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(self.output_buffs[name].host,
self.output_buffs[name].device, self.stream)
# Synchronize the stream
self.stream.synchronize()
# Return only the host outputs.
return {name: np.reshape(self.output_buffs[name].host, self.output_buffs[name].shape)
for name in self.output_buffs}
def close(self):
self.ctx.pop() | 0.783947 | 0.11187 |
import sys
import time
import logging
import schedule
from nemapi import NetEase
from charter import Charter
from utils.cover import CoverToolkits
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=logging.DEBUG)
logger.addHandler(stream_handler)
class NemRoutine(Charter):
n = NetEase()
charts = {
'nem_rise': 0, # 云音乐飙升榜
'nem_new': 3779629, # 云音乐新歌榜
'nem_original': 2884035, # 网易原创歌曲榜
'nem_hot': 3778678, # 云音乐热歌榜
}
nem_bucket_list = CoverToolkits.listBucket('nem')
def getChart(self, key) -> list:
logger.debug(f'getChart getting {key}...')
time.sleep(1)
chart = self.n.top_songlist(_id=self.charts[key])
return self.regularize(chart)
def getSongTitleList(self, key):
return [i['name'] for i in self.__dict__[key]]
def buildHashedList(self, query: list):
'''
:param query: <list> search result from MongoDB
'''
return hash([i['name'] for i in query].__str__())
def regularize(self, chart):
'''
'''
logger.debug('entering regularize...')
pos = 0
for item in chart:
pos += 1
logger.debug(f'pos: {pos}')
album_id = item['album']['id']
item['album_id'] = album_id
song_id = item['id']
item['song_id'] = song_id
item['album_url'] = f'https://music.163.com/album?id={album_id}'
item['album_cover_source'] = item['album']['picUrl']
# storeInCOS will do judge, so just call storeInCOS here
key = CoverToolkits.storeInCOS(
nem_id=album_id,
nem_url=item['album_cover_source'],
)
item['album_cover'] = key
item['release_date'] = item['album']['publishTime']
item['album_title'] = item['album']['name']
item['song_pos'] = pos
item['song_url'] = f'https://music.163.com/song?id={song_id}'
item['song_title'] = item['name']
item['artists'] = []
for a in item['album']['artists']:
artist = {'name': a['name']}
artist['url'] = f'https://music.163.com/artist?id={a["id"]}'
item['artists'].append(artist)
item['company'] = [item['album']['company']]
return chart
if __name__ == "__main__":
NemRoutine()
schedule.every().day.at("00:01").do(NemRoutine)
logger.info(schedule.jobs)
logger.info('schedule 安排上了')
while True:
schedule.run_pending()
time.sleep(1) | charts/nembee.py | import sys
import time
import logging
import schedule
from nemapi import NetEase
from charter import Charter
from utils.cover import CoverToolkits
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=logging.DEBUG)
logger.addHandler(stream_handler)
class NemRoutine(Charter):
n = NetEase()
charts = {
'nem_rise': 0, # 云音乐飙升榜
'nem_new': 3779629, # 云音乐新歌榜
'nem_original': 2884035, # 网易原创歌曲榜
'nem_hot': 3778678, # 云音乐热歌榜
}
nem_bucket_list = CoverToolkits.listBucket('nem')
def getChart(self, key) -> list:
logger.debug(f'getChart getting {key}...')
time.sleep(1)
chart = self.n.top_songlist(_id=self.charts[key])
return self.regularize(chart)
def getSongTitleList(self, key):
return [i['name'] for i in self.__dict__[key]]
def buildHashedList(self, query: list):
'''
:param query: <list> search result from MongoDB
'''
return hash([i['name'] for i in query].__str__())
def regularize(self, chart):
'''
'''
logger.debug('entering regularize...')
pos = 0
for item in chart:
pos += 1
logger.debug(f'pos: {pos}')
album_id = item['album']['id']
item['album_id'] = album_id
song_id = item['id']
item['song_id'] = song_id
item['album_url'] = f'https://music.163.com/album?id={album_id}'
item['album_cover_source'] = item['album']['picUrl']
# storeInCOS will do judge, so just call storeInCOS here
key = CoverToolkits.storeInCOS(
nem_id=album_id,
nem_url=item['album_cover_source'],
)
item['album_cover'] = key
item['release_date'] = item['album']['publishTime']
item['album_title'] = item['album']['name']
item['song_pos'] = pos
item['song_url'] = f'https://music.163.com/song?id={song_id}'
item['song_title'] = item['name']
item['artists'] = []
for a in item['album']['artists']:
artist = {'name': a['name']}
artist['url'] = f'https://music.163.com/artist?id={a["id"]}'
item['artists'].append(artist)
item['company'] = [item['album']['company']]
return chart
if __name__ == "__main__":
NemRoutine()
schedule.every().day.at("00:01").do(NemRoutine)
logger.info(schedule.jobs)
logger.info('schedule 安排上了')
while True:
schedule.run_pending()
time.sleep(1) | 0.193376 | 0.086864 |
import numpy as np
import sys
from .telescope_functions import *
from .usefuls import *
from . import conv
from . import cosmology as cm
from . import smoothing as sm
import scipy
from glob import glob
from time import time, sleep
import pickle
from joblib import Parallel, delayed
from tqdm import tqdm
def noise_map(ncells, z, depth_mhz, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None, verbose=True, fft_wrap=False):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise map by simulating the radio observation strategy.
Parameters
----------
z: float
Redshift.
ncells: int
The grid size.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
noise_map: ndarray
A 2D slice of the interferometric noise at that frequency (in muJy).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
sigma, rms_noi = kanan_noise_image_ska(z, uv_map, depth_mhz, obs_time, int_time, N_ant_ska=N_ant, verbose=False)
noise_real = np.random.normal(loc=0.0, scale=rms_noi, size=(ncells, ncells))
noise_imag = np.random.normal(loc=0.0, scale=rms_noi, size=(ncells, ncells))
noise_arr = noise_real + 1.j*noise_imag
noise_four = apply_uv_response_noise(noise_arr, uv_map)
if fft_wrap: noise_map = ifft2_wrap(noise_four)*np.sqrt(int_time/3600./obs_time)
else: noise_map = np.fft.ifft2(noise_four)*np.sqrt(int_time/3600./obs_time)
return np.real(noise_map)
def apply_uv_response_noise(noise, uv_map):
'''
Apply the effect of uv coverage on the noise array.
'''
out = noise/np.sqrt(uv_map)
out[uv_map==0] = 0.
return out
def ifft2_wrap(nn1):
assert nn1.ndim==2
bla0 = np.vstack((nn1,nn1))
bla1 = np.roll(bla0, nn1.shape[0]/2, 0)
bla2 = np.hstack((bla1,bla1))
bla3 = np.roll(bla2, nn1.shape[1]/2, 1)
imap = np.fft.ifft2(bla3)
return imap[nn1.shape[0]/2:-nn1.shape[0]/2,nn1.shape[1]/2:-nn1.shape[1]/2]
def telescope_response_on_image(array, z, depth_mhz, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None):
"""
Parameters
----------
array: ndarray
Image array
z: float
Redshift.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
Returns
-------
Radio image after applying the effect of radio observation strategy.
"""
assert array.shape[0] == array.shape[1]
ncells = array.shape[0]
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
img_arr = np.fft.fft2(array)
img_arr[uv_map==0] = 0
img_map = np.fft.ifft2(img_arr)
return np.real(img_map)
def get_uv_map(ncells, z, filename=None, total_int_time=6., int_time=10., boxsize=None, declination=-30., verbose=True):
"""
Parameters
----------
ncells: int
Number of cells
z: float
Redshift.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
uv_map: ndarray
array of gridded uv coverage.
N_ant: int
Number of antennae
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
uv_map, N_ant = get_uv_daily_observation(ncells, z, filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=verbose)
return uv_map, N_ant
def make_uv_map_lightcone(ncells, zs, filename=None, total_int_time=6., int_time=10., boxsize=None, declination=-30., verbose=True):
"""
Parameters
----------
ncells: int
Number of cells
zs: ndarray
array of redshift values.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
uv_lc: ndarray
array of gridded uv coverage at all the redshifts.
N_ant: int
Number of antennae
"""
uv_lc = np.zeros((ncells,ncells,zs.shape[0]))
percc = np.round(100./zs.shape[0],decimals=2)
for i in range(zs.shape[0]):
z = zs[i]
uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=verbose)
uv_lc[:,:,i] = uv_map
print("\nThe lightcone has been constructed upto %.1f \%" %(i*percc))
return uv_lc, N_ant
def telescope_response_on_coeval(array, z, depth_mhz=None, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None):
ncells = array.shape[-1]
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
if not depth_mhz: depth_mhz = (cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)-boxsize/2))-cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)+boxsize/2)))/ncells
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
data3d = np.zeros(array.shape)
print("Creating the noise cube")
for k in range(ncells):
data2d = telescope_response_on_image(array[:,:,k], z, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant)
data3d[:,:,k] = data2d
return data3d
def noise_cube_coeval(ncells, z, depth_mhz=None, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None, verbose=True, fft_wrap=False):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise coeval cube by simulating the radio observation strategy.
Parameters
----------
ncells: int
The grid size.
z: float
Redshift.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
noise_cube: ndarray
A 3D cube of the interferometric noise (in mK).
The frequency is assumed to be the same along the assumed frequency (last) axis.
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
if not depth_mhz: depth_mhz = (cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)-boxsize/2))-cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)+boxsize/2)))/ncells
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,ncells))
if verbose: print("Creating the noise cube...")
sleep(1)
for k in tqdm(range(ncells), disable=False if verbose else True):
noise2d = noise_map(ncells, z, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = noise2d
verbose = False
# perc = np.round((k+1)*100/ncells, decimals=1)
# loading_verbose(str(perc)+'%')
if verbose: print("...noise cube created.")
return jansky_2_kelvin(noise3d, z, boxsize=boxsize)
def noise_cube_lightcone(ncells, z, obs_time=1000, filename=None, boxsize=None, save_uvmap=None, total_int_time=6., int_time=10., declination=-30., N_ant=None, fft_wrap=False, n_jobs=4, checkpoint=64):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise cube by simulating the radio observation strategy.
We assume the third axis to be along the line-of-sight and therefore
each each will correspond to a different redshift.
Parameters
----------
ncells: int
The grid size.
z: float
Central redshift.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
save_uvmap: str
Give the filename of the pickle file of uv maps. If
- the file is absent, then uv maps are created and saved with the given filename.
- the file is present, then the uv map is read in.
- the file is present and the uv maps are incomplete, then it is completed.
- None is given, then the uv maps are not saved.
n_jobs: int
Number of CPUs to run in. The calculation is parallelised using joblib.
checkpoint: int
Number of iterations after which uv maps are saved if save_uvmap is not None.
Returns
-------
noise_lightcone: A 3D cubical lightcone of the interferometric noise with frequency varying
along last axis(in mK).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
zs = cm.cdist_to_z(np.linspace(cm.z_to_cdist(z)-boxsize/2, cm.z_to_cdist(z)+boxsize/2, ncells))
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,ncells))
verbose = True
if save_uvmap is not None:
save_uvmap = save_uvmap.split('.')[0]+'.pkl'
if len(glob(save_uvmap)):
uvs = pickle.load(open(save_uvmap, 'rb'))
print('All or some uv maps is read from the given file. Be sure that they were run with the same parameter values as provided now.')
else:
uvs = {}
else:
uvs = {}
# Create uv maps
print('Creating the uv maps.')
if n_jobs<=1:
tstart = time()
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
# uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
# else:
uv_map, N_ant = get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
uvs['{:.5f}'.format(zi)] = uv_map
uvs['Nant'] = N_ant
pickle.dump(uvs, open(save_uvmap, 'wb'))
verbose = False
tend = time()
print('\nz = {:.5f} | {:.2f} % completed | Elapsed time: {:.2f} mins'.format(zi,100*(k+1)/zs.size,(tend-tstart)/60))
else:
Nbase, N_ant = from_antenna_config(filename, zs[0])
uvs['Nant'] = N_ant
_uvmap = lambda zi: get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=False)[0]
if checkpoint<2*n_jobs:
checkpoint = 4*n_jobs
print('checkpoint value should be more than 4*n_jobs. checkpoint set to 4*n_jobs.')
z_run = np.array([])
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
z_run = np.append(z_run, zi)
n_iterations = int(z_run.size/checkpoint)
if n_iterations>1:
for ii in range(n_iterations):
istart, iend = ii*checkpoint, (ii+1)*checkpoint
zrs = z_run[istart:iend] if ii+1<n_iterations else z_run[istart:]
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in zrs)
for jj,zi in enumerate(zrs):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('{:.2f} % completed'.format(100*(len(uvs.keys())-1)/zs.size))
else:
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in z_run)
for jj,zi in enumerate(z_run):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('...done')
# Calculate noise maps
print('Creating noise.')
for k,zi in enumerate(zs):
if k+1<zs.size: depth_mhz = np.abs(cm.z_to_nu(zs[k+1])-cm.z_to_nu(zs[k]))
else: depth_mhz = np.abs(cm.z_to_nu(zs[k])-cm.z_to_nu(zs[k-1]))
uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
noise2d = noise_map(ncells, zi, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = jansky_2_kelvin(noise2d, zi, boxsize=boxsize)
verbose = False
print('z = {:.5f} | {:.2f} % completed'.format(zi,100*(k+1)/zs.size))
return jansky_2_kelvin(noise3d, z, boxsize=boxsize)
def noise_lightcone(ncells, zs, obs_time=1000, filename=None, boxsize=None, save_uvmap=None, total_int_time=6., int_time=10., declination=-30., N_ant=None, fft_wrap=False, n_jobs=4, checkpoint=64):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise lightcone by simulating the radio observation strategy.
Parameters
----------
ncells: int
The grid size.
zs: ndarray
List of redshifts.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
save_uvmap: str
Give the filename of the pickle file of uv maps. If
- the file is absent, then uv maps are created and saved with the given filename.
- the file is present, then the uv map is read in.
- the file is present and the uv maps are incomplete, then it is completed.
- None is given, then the uv maps are not saved.
n_jobs: int
Number of CPUs to run in. The calculation is parallelised using joblib.
checkpoint: int
Number of iterations after which uv maps are saved if save_uvmap is not None.
Returns
-------
noise_lightcone: A 3D lightcone of the interferometric noise with frequency varying
along last axis(in mK).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
# zs = cm.cdist_to_z(np.linspace(cm.z_to_cdist(z)-boxsize/2, cm.z_to_cdist(z)+boxsize/2, ncells))
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,zs.size))
verbose = True
if save_uvmap is not None:
save_uvmap = save_uvmap.split('.')[0]+'.pkl'
if len(glob(save_uvmap)):
uvs = pickle.load(open(save_uvmap, 'rb'))
print('All or some uv maps is read from the given file. Be sure that they were run with the same parameter values as provided now.')
else:
uvs = {}
else:
uvs = {}
# Create uv maps
print('Creating the uv maps.')
if n_jobs<=1:
tstart = time()
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
# uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
# else:
uv_map, N_ant = get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
uvs['{:.5f}'.format(zi)] = uv_map
uvs['Nant'] = N_ant
pickle.dump(uvs, open(save_uvmap, 'wb'))
verbose = False
tend = time()
print('\nz = {:.5f} | {:.2f} % completed | Elapsed time: {:.2f} mins'.format(zi,100*(k+1)/zs.size,(tend-tstart)/60))
else:
Nbase, N_ant = from_antenna_config(filename, zs[0])
uvs['Nant'] = N_ant
_uvmap = lambda zi: get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=False)[0]
if checkpoint<2*n_jobs:
checkpoint = 4*n_jobs
print('checkpoint value should be more than 4*n_jobs. checkpoint set to 4*n_jobs.')
z_run = np.array([])
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
z_run = np.append(z_run, zi)
n_iterations = int(z_run.size/checkpoint)
if n_iterations>1:
for ii in range(n_iterations):
istart, iend = ii*checkpoint, (ii+1)*checkpoint
zrs = z_run[istart:iend] if ii+1<n_iterations else z_run[istart:]
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in zrs)
for jj,zi in enumerate(zrs):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('{:.2f} % completed'.format(100*(len(uvs.keys())-1)/zs.size))
else:
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in z_run)
for jj,zi in enumerate(z_run):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('...done')
# Calculate noise maps
print('Creating noise.')
for k,zi in enumerate(zs):
if k+1<zs.size: depth_mhz = np.abs(cm.z_to_nu(zs[k+1])-cm.z_to_nu(zs[k]))
else: depth_mhz = np.abs(cm.z_to_nu(zs[k])-cm.z_to_nu(zs[k-1]))
uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
noise2d = noise_map(ncells, zi, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = jansky_2_kelvin(noise2d, zi, boxsize=boxsize)
verbose = False
print('\nz = {:.5f} | {:.2f} % completed'.format(zi,100*(k+1)/zs.size))
return noise3d
def gauss_kernel_3d(size, sigma=1.0, fwhm=None):
'''
Generate a normalized gaussian kernel, defined as
exp(-(x^2 + y^2 + z^2)/(2sigma^2)).
Parameters:
size (int): Width of output array in pixels.
sigma = 1.0 (float): The sigma parameter for the Gaussian.
fwhm = None (float or None): The full width at half maximum.
If this parameter is given, it overrides sigma.
Returns:
numpy array with the Gaussian. The dimensions will be
size x size or size x sizey depending on whether
sizey is set. The Gaussian is normalized so that its
integral is 1.
'''
if fwhm != None:
sigma = fwhm/(2.*np.sqrt(2.*np.log(2)))
if size % 2 == 0:
size = int(size/2)
x,y,z = np.mgrid[-size:size, -size:size, -size:size]
else:
size = int(size/2)
x,y,z = np.mgrid[-size:size+1, -size:size+1, -size:size+1]
g = np.exp(-(x**2 + y**2 + z**2)/(2.*sigma**2))
return g/g.sum()
def smooth_gauss_3d(array, fwhm):
gg = gauss_kernel_3d(array.shape[0],fwhm=fwhm)
out = scipy.signal.fftconvolve(array, gg)
return out | src/tools21cm/noise_model.py | import numpy as np
import sys
from .telescope_functions import *
from .usefuls import *
from . import conv
from . import cosmology as cm
from . import smoothing as sm
import scipy
from glob import glob
from time import time, sleep
import pickle
from joblib import Parallel, delayed
from tqdm import tqdm
def noise_map(ncells, z, depth_mhz, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None, verbose=True, fft_wrap=False):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise map by simulating the radio observation strategy.
Parameters
----------
z: float
Redshift.
ncells: int
The grid size.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
noise_map: ndarray
A 2D slice of the interferometric noise at that frequency (in muJy).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
sigma, rms_noi = kanan_noise_image_ska(z, uv_map, depth_mhz, obs_time, int_time, N_ant_ska=N_ant, verbose=False)
noise_real = np.random.normal(loc=0.0, scale=rms_noi, size=(ncells, ncells))
noise_imag = np.random.normal(loc=0.0, scale=rms_noi, size=(ncells, ncells))
noise_arr = noise_real + 1.j*noise_imag
noise_four = apply_uv_response_noise(noise_arr, uv_map)
if fft_wrap: noise_map = ifft2_wrap(noise_four)*np.sqrt(int_time/3600./obs_time)
else: noise_map = np.fft.ifft2(noise_four)*np.sqrt(int_time/3600./obs_time)
return np.real(noise_map)
def apply_uv_response_noise(noise, uv_map):
'''
Apply the effect of uv coverage on the noise array.
'''
out = noise/np.sqrt(uv_map)
out[uv_map==0] = 0.
return out
def ifft2_wrap(nn1):
assert nn1.ndim==2
bla0 = np.vstack((nn1,nn1))
bla1 = np.roll(bla0, nn1.shape[0]/2, 0)
bla2 = np.hstack((bla1,bla1))
bla3 = np.roll(bla2, nn1.shape[1]/2, 1)
imap = np.fft.ifft2(bla3)
return imap[nn1.shape[0]/2:-nn1.shape[0]/2,nn1.shape[1]/2:-nn1.shape[1]/2]
def telescope_response_on_image(array, z, depth_mhz, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None):
"""
Parameters
----------
array: ndarray
Image array
z: float
Redshift.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
Returns
-------
Radio image after applying the effect of radio observation strategy.
"""
assert array.shape[0] == array.shape[1]
ncells = array.shape[0]
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
img_arr = np.fft.fft2(array)
img_arr[uv_map==0] = 0
img_map = np.fft.ifft2(img_arr)
return np.real(img_map)
def get_uv_map(ncells, z, filename=None, total_int_time=6., int_time=10., boxsize=None, declination=-30., verbose=True):
"""
Parameters
----------
ncells: int
Number of cells
z: float
Redshift.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
uv_map: ndarray
array of gridded uv coverage.
N_ant: int
Number of antennae
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
uv_map, N_ant = get_uv_daily_observation(ncells, z, filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=verbose)
return uv_map, N_ant
def make_uv_map_lightcone(ncells, zs, filename=None, total_int_time=6., int_time=10., boxsize=None, declination=-30., verbose=True):
"""
Parameters
----------
ncells: int
Number of cells
zs: ndarray
array of redshift values.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
uv_lc: ndarray
array of gridded uv coverage at all the redshifts.
N_ant: int
Number of antennae
"""
uv_lc = np.zeros((ncells,ncells,zs.shape[0]))
percc = np.round(100./zs.shape[0],decimals=2)
for i in range(zs.shape[0]):
z = zs[i]
uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=verbose)
uv_lc[:,:,i] = uv_map
print("\nThe lightcone has been constructed upto %.1f \%" %(i*percc))
return uv_lc, N_ant
def telescope_response_on_coeval(array, z, depth_mhz=None, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None):
ncells = array.shape[-1]
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
if not depth_mhz: depth_mhz = (cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)-boxsize/2))-cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)+boxsize/2)))/ncells
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
data3d = np.zeros(array.shape)
print("Creating the noise cube")
for k in range(ncells):
data2d = telescope_response_on_image(array[:,:,k], z, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant)
data3d[:,:,k] = data2d
return data3d
def noise_cube_coeval(ncells, z, depth_mhz=None, obs_time=1000, filename=None, boxsize=None, total_int_time=6., int_time=10., declination=-30., uv_map=np.array([]), N_ant=None, verbose=True, fft_wrap=False):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise coeval cube by simulating the radio observation strategy.
Parameters
----------
ncells: int
The grid size.
z: float
Redshift.
depth_mhz: float
The bandwidth in MHz.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
uv_map: ndarray
numpy array containing gridded uv coverage. If nothing given, then the uv map
will be simulated
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
Returns
-------
noise_cube: ndarray
A 3D cube of the interferometric noise (in mK).
The frequency is assumed to be the same along the assumed frequency (last) axis.
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
if not depth_mhz: depth_mhz = (cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)-boxsize/2))-cm.z_to_nu(cm.cdist_to_z(cm.z_to_cdist(z)+boxsize/2)))/ncells
if not uv_map.size: uv_map, N_ant = get_uv_map(ncells, z, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,ncells))
if verbose: print("Creating the noise cube...")
sleep(1)
for k in tqdm(range(ncells), disable=False if verbose else True):
noise2d = noise_map(ncells, z, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = noise2d
verbose = False
# perc = np.round((k+1)*100/ncells, decimals=1)
# loading_verbose(str(perc)+'%')
if verbose: print("...noise cube created.")
return jansky_2_kelvin(noise3d, z, boxsize=boxsize)
def noise_cube_lightcone(ncells, z, obs_time=1000, filename=None, boxsize=None, save_uvmap=None, total_int_time=6., int_time=10., declination=-30., N_ant=None, fft_wrap=False, n_jobs=4, checkpoint=64):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise cube by simulating the radio observation strategy.
We assume the third axis to be along the line-of-sight and therefore
each each will correspond to a different redshift.
Parameters
----------
ncells: int
The grid size.
z: float
Central redshift.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
save_uvmap: str
Give the filename of the pickle file of uv maps. If
- the file is absent, then uv maps are created and saved with the given filename.
- the file is present, then the uv map is read in.
- the file is present and the uv maps are incomplete, then it is completed.
- None is given, then the uv maps are not saved.
n_jobs: int
Number of CPUs to run in. The calculation is parallelised using joblib.
checkpoint: int
Number of iterations after which uv maps are saved if save_uvmap is not None.
Returns
-------
noise_lightcone: A 3D cubical lightcone of the interferometric noise with frequency varying
along last axis(in mK).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
zs = cm.cdist_to_z(np.linspace(cm.z_to_cdist(z)-boxsize/2, cm.z_to_cdist(z)+boxsize/2, ncells))
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,ncells))
verbose = True
if save_uvmap is not None:
save_uvmap = save_uvmap.split('.')[0]+'.pkl'
if len(glob(save_uvmap)):
uvs = pickle.load(open(save_uvmap, 'rb'))
print('All or some uv maps is read from the given file. Be sure that they were run with the same parameter values as provided now.')
else:
uvs = {}
else:
uvs = {}
# Create uv maps
print('Creating the uv maps.')
if n_jobs<=1:
tstart = time()
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
# uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
# else:
uv_map, N_ant = get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
uvs['{:.5f}'.format(zi)] = uv_map
uvs['Nant'] = N_ant
pickle.dump(uvs, open(save_uvmap, 'wb'))
verbose = False
tend = time()
print('\nz = {:.5f} | {:.2f} % completed | Elapsed time: {:.2f} mins'.format(zi,100*(k+1)/zs.size,(tend-tstart)/60))
else:
Nbase, N_ant = from_antenna_config(filename, zs[0])
uvs['Nant'] = N_ant
_uvmap = lambda zi: get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=False)[0]
if checkpoint<2*n_jobs:
checkpoint = 4*n_jobs
print('checkpoint value should be more than 4*n_jobs. checkpoint set to 4*n_jobs.')
z_run = np.array([])
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
z_run = np.append(z_run, zi)
n_iterations = int(z_run.size/checkpoint)
if n_iterations>1:
for ii in range(n_iterations):
istart, iend = ii*checkpoint, (ii+1)*checkpoint
zrs = z_run[istart:iend] if ii+1<n_iterations else z_run[istart:]
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in zrs)
for jj,zi in enumerate(zrs):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('{:.2f} % completed'.format(100*(len(uvs.keys())-1)/zs.size))
else:
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in z_run)
for jj,zi in enumerate(z_run):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('...done')
# Calculate noise maps
print('Creating noise.')
for k,zi in enumerate(zs):
if k+1<zs.size: depth_mhz = np.abs(cm.z_to_nu(zs[k+1])-cm.z_to_nu(zs[k]))
else: depth_mhz = np.abs(cm.z_to_nu(zs[k])-cm.z_to_nu(zs[k-1]))
uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
noise2d = noise_map(ncells, zi, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = jansky_2_kelvin(noise2d, zi, boxsize=boxsize)
verbose = False
print('z = {:.5f} | {:.2f} % completed'.format(zi,100*(k+1)/zs.size))
return jansky_2_kelvin(noise3d, z, boxsize=boxsize)
def noise_lightcone(ncells, zs, obs_time=1000, filename=None, boxsize=None, save_uvmap=None, total_int_time=6., int_time=10., declination=-30., N_ant=None, fft_wrap=False, n_jobs=4, checkpoint=64):
"""
@ Ghara et al. (2017), Giri et al. (2018b)
It creates a noise lightcone by simulating the radio observation strategy.
Parameters
----------
ncells: int
The grid size.
zs: ndarray
List of redshifts.
obs_time: float
The observation time in hours.
total_int_time: float
Total observation per day time in hours
int_time: float
Intergration time in seconds
declination: float
Declination angle in deg
N_ant: int
Number of antennae
filename: str
The path to the file containing the telescope configuration.
- As a default, it takes the SKA-Low configuration from Sept 2016
- It is not used if uv_map and N_ant is provided
boxsize: float
Boxsize in Mpc
verbose: bool
If True, verbose is shown
save_uvmap: str
Give the filename of the pickle file of uv maps. If
- the file is absent, then uv maps are created and saved with the given filename.
- the file is present, then the uv map is read in.
- the file is present and the uv maps are incomplete, then it is completed.
- None is given, then the uv maps are not saved.
n_jobs: int
Number of CPUs to run in. The calculation is parallelised using joblib.
checkpoint: int
Number of iterations after which uv maps are saved if save_uvmap is not None.
Returns
-------
noise_lightcone: A 3D lightcone of the interferometric noise with frequency varying
along last axis(in mK).
"""
if not filename: N_ant = SKA1_LowConfig_Sept2016().shape[0]
if not boxsize: boxsize = conv.LB
# zs = cm.cdist_to_z(np.linspace(cm.z_to_cdist(z)-boxsize/2, cm.z_to_cdist(z)+boxsize/2, ncells))
if not N_ant: N_ant = np.loadtxt(filename, dtype=str).shape[0]
noise3d = np.zeros((ncells,ncells,zs.size))
verbose = True
if save_uvmap is not None:
save_uvmap = save_uvmap.split('.')[0]+'.pkl'
if len(glob(save_uvmap)):
uvs = pickle.load(open(save_uvmap, 'rb'))
print('All or some uv maps is read from the given file. Be sure that they were run with the same parameter values as provided now.')
else:
uvs = {}
else:
uvs = {}
# Create uv maps
print('Creating the uv maps.')
if n_jobs<=1:
tstart = time()
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
# uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
# else:
uv_map, N_ant = get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination)
uvs['{:.5f}'.format(zi)] = uv_map
uvs['Nant'] = N_ant
pickle.dump(uvs, open(save_uvmap, 'wb'))
verbose = False
tend = time()
print('\nz = {:.5f} | {:.2f} % completed | Elapsed time: {:.2f} mins'.format(zi,100*(k+1)/zs.size,(tend-tstart)/60))
else:
Nbase, N_ant = from_antenna_config(filename, zs[0])
uvs['Nant'] = N_ant
_uvmap = lambda zi: get_uv_map(ncells, zi, filename=filename, total_int_time=total_int_time, int_time=int_time, boxsize=boxsize, declination=declination, verbose=False)[0]
if checkpoint<2*n_jobs:
checkpoint = 4*n_jobs
print('checkpoint value should be more than 4*n_jobs. checkpoint set to 4*n_jobs.')
z_run = np.array([])
for k,zi in enumerate(zs):
if '{:.5f}'.format(zi) not in uvs.keys():
z_run = np.append(z_run, zi)
n_iterations = int(z_run.size/checkpoint)
if n_iterations>1:
for ii in range(n_iterations):
istart, iend = ii*checkpoint, (ii+1)*checkpoint
zrs = z_run[istart:iend] if ii+1<n_iterations else z_run[istart:]
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in zrs)
for jj,zi in enumerate(zrs):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('{:.2f} % completed'.format(100*(len(uvs.keys())-1)/zs.size))
else:
fla = Parallel(n_jobs=n_jobs,verbose=20)(delayed(_uvmap)(i) for i in z_run)
for jj,zi in enumerate(z_run):
uvs['{:.5f}'.format(zi)] = fla[jj]
if save_uvmap is not None: pickle.dump(uvs, open(save_uvmap, 'wb'))
print('...done')
# Calculate noise maps
print('Creating noise.')
for k,zi in enumerate(zs):
if k+1<zs.size: depth_mhz = np.abs(cm.z_to_nu(zs[k+1])-cm.z_to_nu(zs[k]))
else: depth_mhz = np.abs(cm.z_to_nu(zs[k])-cm.z_to_nu(zs[k-1]))
uv_map, N_ant = uvs['{:.5f}'.format(zi)], uvs['Nant']
noise2d = noise_map(ncells, zi, depth_mhz, obs_time=obs_time, filename=filename, boxsize=boxsize, total_int_time=total_int_time, int_time=int_time, declination=declination, uv_map=uv_map, N_ant=N_ant, verbose=verbose, fft_wrap=fft_wrap)
noise3d[:,:,k] = jansky_2_kelvin(noise2d, zi, boxsize=boxsize)
verbose = False
print('\nz = {:.5f} | {:.2f} % completed'.format(zi,100*(k+1)/zs.size))
return noise3d
def gauss_kernel_3d(size, sigma=1.0, fwhm=None):
'''
Generate a normalized gaussian kernel, defined as
exp(-(x^2 + y^2 + z^2)/(2sigma^2)).
Parameters:
size (int): Width of output array in pixels.
sigma = 1.0 (float): The sigma parameter for the Gaussian.
fwhm = None (float or None): The full width at half maximum.
If this parameter is given, it overrides sigma.
Returns:
numpy array with the Gaussian. The dimensions will be
size x size or size x sizey depending on whether
sizey is set. The Gaussian is normalized so that its
integral is 1.
'''
if fwhm != None:
sigma = fwhm/(2.*np.sqrt(2.*np.log(2)))
if size % 2 == 0:
size = int(size/2)
x,y,z = np.mgrid[-size:size, -size:size, -size:size]
else:
size = int(size/2)
x,y,z = np.mgrid[-size:size+1, -size:size+1, -size:size+1]
g = np.exp(-(x**2 + y**2 + z**2)/(2.*sigma**2))
return g/g.sum()
def smooth_gauss_3d(array, fwhm):
gg = gauss_kernel_3d(array.shape[0],fwhm=fwhm)
out = scipy.signal.fftconvolve(array, gg)
return out | 0.560974 | 0.352007 |
import os
import sys
import re
COPYRIGHT = re.compile(r'Copyright')
INTEL_COPYRIGHT = re.compile(r'Copyright (\(c\) )?(201(8|9)-)?20(20|19|18) Intel Corporation')
FORBIDDEN_FUNCTIONS = re.compile(r'setjmp\(|longjmp\(|getwd\(|strlen\(|wcslen\(|gets\(|strcpy\(|wcscpy\(|strcat\(|wcscat\(|sprintf\(|vsprintf\(|asctime\(')
def check_header(fd):
result = False
detected = False
try:
for line in fd:
if COPYRIGHT.findall(line):
detected = True
if INTEL_COPYRIGHT.findall(line):
result = True
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected, result
def check_function(fd):
# Add space separated exceptions for given file in the dictionary
fix_applied = {"./src/test/ensemble_flow_custom_node_tests.cpp":"size_t strLen = std::strlen(str);size_t prefixLen = std::strlen(prefix);",}
result = False
detected = False
try:
for line in fd:
found = FORBIDDEN_FUNCTIONS.findall(line)
if found:
if line.trim() in fix_applied[fd.name]:
#It's ok fix and check was applied and verified
continue
detected = True
print("ERROR: Forbidden function detected in:" + str(fd.name))
print("Line start:" + str(line) + "End")
print("Function:" + str(found))
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected
def check_dir(start_dir):
ok = []
not_ok = []
no_header = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json', '.proto', 'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', '.wav',
'forbidden_functions.txt', 'missing_headers.txt', 'increment_1x3x4x5.xml', 'horizontal-text-detection.gif', 'model.xml',
'summator.xml', 'resnet_images.txt', 'vehicle_images.txt']
exclude_directories = ['/dist/', 'extras/ovms-operator', 'extras/openvino-operator-openshift']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
header_detected, result = check_header(fd)
if header_detected:
if result:
ok.append(fpath)
else:
not_ok.append(fpath)
else:
no_header.append(fpath)
return not_ok, no_header
def check_func(start_dir):
ok = []
not_ok = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json' ,'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', 'forbidden_functions.txt', 'missing_headers.txt',
'summator.xml']
exclude_directories = ['/dist/', 'extras/ovms-operator']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
detected = check_function(fd)
if detected:
not_ok.append(fpath)
else:
ok.append(fpath)
return not_ok
def main():
if len(sys.argv) < 2:
print('Provide start dir!')
else:
start_dir = sys.argv[1]
print('Provided start dir:' + start_dir)
if len(sys.argv) > 2 and sys.argv[2] == 'functions':
print("Check for forbidden functions")
forbidden_func = check_func(start_dir)
if len(forbidden_func) == 0:
print('Success: All files checked for forbidden functions')
else:
print('#########################')
print('## Forbidden functions detected:')
for forbid_func in forbidden_func:
print(f'{forbid_func}')
else:
print("Check for missing headers")
external_component_set, no_header_set = check_dir(start_dir)
if len(no_header_set) == 0:
print('Success: All files have headers')
else:
print('#########################')
print('## No header files detected:')
for no_header in no_header_set:
print(f'{no_header}')
if __name__ == '__main__':
main() | lib_search.py |
import os
import sys
import re
COPYRIGHT = re.compile(r'Copyright')
INTEL_COPYRIGHT = re.compile(r'Copyright (\(c\) )?(201(8|9)-)?20(20|19|18) Intel Corporation')
FORBIDDEN_FUNCTIONS = re.compile(r'setjmp\(|longjmp\(|getwd\(|strlen\(|wcslen\(|gets\(|strcpy\(|wcscpy\(|strcat\(|wcscat\(|sprintf\(|vsprintf\(|asctime\(')
def check_header(fd):
result = False
detected = False
try:
for line in fd:
if COPYRIGHT.findall(line):
detected = True
if INTEL_COPYRIGHT.findall(line):
result = True
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected, result
def check_function(fd):
# Add space separated exceptions for given file in the dictionary
fix_applied = {"./src/test/ensemble_flow_custom_node_tests.cpp":"size_t strLen = std::strlen(str);size_t prefixLen = std::strlen(prefix);",}
result = False
detected = False
try:
for line in fd:
found = FORBIDDEN_FUNCTIONS.findall(line)
if found:
if line.trim() in fix_applied[fd.name]:
#It's ok fix and check was applied and verified
continue
detected = True
print("ERROR: Forbidden function detected in:" + str(fd.name))
print("Line start:" + str(line) + "End")
print("Function:" + str(found))
break
except:
print("ERROR: Cannot parse file:" + str(fd))
return detected
def check_dir(start_dir):
ok = []
not_ok = []
no_header = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json', '.proto', 'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', '.wav',
'forbidden_functions.txt', 'missing_headers.txt', 'increment_1x3x4x5.xml', 'horizontal-text-detection.gif', 'model.xml',
'summator.xml', 'resnet_images.txt', 'vehicle_images.txt']
exclude_directories = ['/dist/', 'extras/ovms-operator', 'extras/openvino-operator-openshift']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
header_detected, result = check_header(fd)
if header_detected:
if result:
ok.append(fpath)
else:
not_ok.append(fpath)
else:
no_header.append(fpath)
return not_ok, no_header
def check_func(start_dir):
ok = []
not_ok = []
exclude_files = ['__pycache__', '.venv', '.pytest_cache', '.vscode', 'ovms-c/dist', '.git', '.tar.gz', 'docx',
'.npy', '.png', '.svg', '.bin', '.jpeg', '.jpg', 'license.txt', 'md', '.groovy', '.json' ,'bazel-',
'Doxyfile', 'clang-format','net_http.patch', 'tftext.patch', 'tf.patch', 'client_requirements.txt',
'openvino.LICENSE.txt', 'c-ares.LICENSE.txt', 'zlib.LICENSE.txt', 'boost.LICENSE.txt',
'libuuid.LICENSE.txt', 'input_images.txt', 'REST_age_gender.ipynb', 'dummy.xml', 'listen.patch', 'add.xml',
'requirements.txt', 'missing_headers.txt', 'libevent/BUILD', 'azure_sdk.patch', 'rest_sdk_v2.10.16.patch', 'forbidden_functions.txt', 'missing_headers.txt',
'summator.xml']
exclude_directories = ['/dist/', 'extras/ovms-operator']
for (d_path, dir_set, file_set) in os.walk(start_dir):
for f_name in file_set:
skip = False
for excluded in exclude_directories:
if excluded in d_path:
skip = True
print('Warning - Skipping directory - ' + d_path + ' for file - ' + f_name)
break
if skip:
continue
fpath = os.path.join(d_path, f_name)
if not [test for test in exclude_files if test in fpath]:
with open(fpath, 'r') as fd:
detected = check_function(fd)
if detected:
not_ok.append(fpath)
else:
ok.append(fpath)
return not_ok
def main():
if len(sys.argv) < 2:
print('Provide start dir!')
else:
start_dir = sys.argv[1]
print('Provided start dir:' + start_dir)
if len(sys.argv) > 2 and sys.argv[2] == 'functions':
print("Check for forbidden functions")
forbidden_func = check_func(start_dir)
if len(forbidden_func) == 0:
print('Success: All files checked for forbidden functions')
else:
print('#########################')
print('## Forbidden functions detected:')
for forbid_func in forbidden_func:
print(f'{forbid_func}')
else:
print("Check for missing headers")
external_component_set, no_header_set = check_dir(start_dir)
if len(no_header_set) == 0:
print('Success: All files have headers')
else:
print('#########################')
print('## No header files detected:')
for no_header in no_header_set:
print(f'{no_header}')
if __name__ == '__main__':
main() | 0.140912 | 0.095392 |
import unittest
from silasdk.processingTypes import ProcessingTypes
from silasdk.transactions import Transaction
from silasdk.users import User
from tests.poll_until_status import poll
from tests.test_config import (sardine_handle, eth_private_key_6,
app, business_uuid, eth_private_key, user_handle)
class Test011RedeemSilaTest(unittest.TestCase):
def test_redeem_sila_200(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"account_name": "default_plaid",
"descriptor": "test descriptor",
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
self.assertEqual(response["descriptor"], "test descriptor")
self.assertIsNotNone(response["transaction_id"])
def test_redeem_sila_400(self):
payload = {
"user_handle": user_handle
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertEqual(response["status"], "FAILURE")
def test_redeem_sila_401(self):
payload = {
"user_handle": "",
"amount": "-1"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertEqual(response["status"], "FAILURE")
def test_redeem_sila_200_with_card_name(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"card_name": "visa"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertTrue(response["success"])
def test_redeem_sila_card_200(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"card_name": "visa",
"processing_type": ProcessingTypes.CARD
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
def test_redeem_sila_400_both_card_account(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"account_name": "test_account",
"card_name": "visa"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertFalse(response["success"])
def test_redeem_sila_vaccount_200(self):
payload = {
"virtual_account_name": "test_v_acc",
"user_handle": user_handle
}
response = User.openVirtualAccount(app, payload, eth_private_key)
self.assertTrue(response["success"])
v_id = response.get("virtual_account").get("virtual_account_id")
payload = {
"user_handle": user_handle
}
response = User.getPaymentMethods(app, payload, eth_private_key)
self.assertTrue(response["success"])
for item in response.get("payment_methods"):
if item["payment_method_type"] == "card":
card_id = item.get("card_id")
descriptor = "test descriptor"
payload = {
"message": "issue_msg",
"user_handle": user_handle,
"amount": 200,
"account_name": "default_plaid",
"descriptor": descriptor,
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH,
"destination_id": v_id,
}
response = Transaction.issue_sila(app, payload, eth_private_key)
poll(self, response["transaction_id"], "success",
app, user_handle, eth_private_key)
descriptor = "test descriptor"
payload = {
"message": "redeem_msg",
"user_handle": user_handle,
"amount": 100,
"destination_id": card_id,
"descriptor": descriptor,
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH,
"source_id": v_id,
}
response = Transaction.redeemSila(app, payload, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
def test_redeem_sila_instant_settelment_200(self):
payload = {
"user_handle": sardine_handle,
"amount": 50,
"account_name": "default_<PASSWORD>",
"descriptor": "test descriptor",
"business_uuid": business_uuid,
}
response = Transaction.redeemSila(
app, payload, eth_private_key_6)
self.assertEqual(response["status"], "SUCCESS")
if __name__ == '__main__':
unittest.main() | tests/test011_redeem_sila.py | import unittest
from silasdk.processingTypes import ProcessingTypes
from silasdk.transactions import Transaction
from silasdk.users import User
from tests.poll_until_status import poll
from tests.test_config import (sardine_handle, eth_private_key_6,
app, business_uuid, eth_private_key, user_handle)
class Test011RedeemSilaTest(unittest.TestCase):
def test_redeem_sila_200(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"account_name": "default_plaid",
"descriptor": "test descriptor",
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
self.assertEqual(response["descriptor"], "test descriptor")
self.assertIsNotNone(response["transaction_id"])
def test_redeem_sila_400(self):
payload = {
"user_handle": user_handle
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertEqual(response["status"], "FAILURE")
def test_redeem_sila_401(self):
payload = {
"user_handle": "",
"amount": "-1"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertEqual(response["status"], "FAILURE")
def test_redeem_sila_200_with_card_name(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"card_name": "visa"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertTrue(response["success"])
def test_redeem_sila_card_200(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"card_name": "visa",
"processing_type": ProcessingTypes.CARD
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
# poll(self, response["transaction_id"], "success",
# app, user_handle, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
def test_redeem_sila_400_both_card_account(self):
payload = {
"user_handle": user_handle,
"amount": 50,
"account_name": "test_account",
"card_name": "visa"
}
response = Transaction.redeemSila(
app, payload, eth_private_key)
self.assertFalse(response["success"])
def test_redeem_sila_vaccount_200(self):
payload = {
"virtual_account_name": "test_v_acc",
"user_handle": user_handle
}
response = User.openVirtualAccount(app, payload, eth_private_key)
self.assertTrue(response["success"])
v_id = response.get("virtual_account").get("virtual_account_id")
payload = {
"user_handle": user_handle
}
response = User.getPaymentMethods(app, payload, eth_private_key)
self.assertTrue(response["success"])
for item in response.get("payment_methods"):
if item["payment_method_type"] == "card":
card_id = item.get("card_id")
descriptor = "test descriptor"
payload = {
"message": "issue_msg",
"user_handle": user_handle,
"amount": 200,
"account_name": "default_plaid",
"descriptor": descriptor,
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH,
"destination_id": v_id,
}
response = Transaction.issue_sila(app, payload, eth_private_key)
poll(self, response["transaction_id"], "success",
app, user_handle, eth_private_key)
descriptor = "test descriptor"
payload = {
"message": "redeem_msg",
"user_handle": user_handle,
"amount": 100,
"destination_id": card_id,
"descriptor": descriptor,
"business_uuid": business_uuid,
"processing_type": ProcessingTypes.STANDARD_ACH,
"source_id": v_id,
}
response = Transaction.redeemSila(app, payload, eth_private_key)
self.assertEqual(response["status"], "SUCCESS")
def test_redeem_sila_instant_settelment_200(self):
payload = {
"user_handle": sardine_handle,
"amount": 50,
"account_name": "default_<PASSWORD>",
"descriptor": "test descriptor",
"business_uuid": business_uuid,
}
response = Transaction.redeemSila(
app, payload, eth_private_key_6)
self.assertEqual(response["status"], "SUCCESS")
if __name__ == '__main__':
unittest.main() | 0.549882 | 0.42322 |
import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
LeadToEndOfLineTokenBuilder
)
from cx_token_builders import (
SlashSlashCommentTokenBuilder,
SlashStarCommentTokenBuilder
)
from examiner import Examiner
class CsharpExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SlashSlashCommentTokenBuilder.__escape_z__()
SlashStarCommentTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
real_tb = RealTokenBuilder(False, False, None)
real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None)
operand_types.append('number')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 10)
prefixed_string_tb = PrefixedStringTokenBuilder('@', False, ['"'])
operand_types.append('string')
slash_slash_comment_tb = SlashSlashCommentTokenBuilder()
slash_star_comment_tb = SlashStarCommentTokenBuilder()
directives = [
'#if', '#else', '#elif', '#endif',
'#define', '#undef',
'#line', '#pragma'
]
preprocessor_tb = CaseSensitiveListTokenBuilder(directives, 'preprocessor', False)
c_warning_tb = LeadToEndOfLineTokenBuilder('#warning', True, 'preprocessor')
c_error_tb = LeadToEndOfLineTokenBuilder('#error', True, 'preprocessor')
c_region_tb = LeadToEndOfLineTokenBuilder('#region', True, 'preprocessor')
c_endregion_tb = LeadToEndOfLineTokenBuilder('#endregion', True, 'preprocessor')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
known_operators = [
'+', '-', '*', '/', '%',
'=', '==', '!=', '>', '>=', '<', '<=',
'+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=',
'!', '&', '|', '~', '<<', '>>',
'^',
'.',
'++', '--', '->', '&&', '||',
'?', '??', '?.', '?[',
'=>',
'as', 'is', 'await', 'sizeof',
'typeof', 'new'
]
self.unary_operators = [
'+', '-',
'!', '~',
'++', '--',
'new', 'sizeof', 'typeof'
]
self.postfix_operators = [
'++', '--'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
groupers = ['(', ')', ',', '[', ']', '{', '}', ':']
group_starts = ['(', '[', ',', '{']
group_ends = [')', ']', '}']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'abstract', 'break',
'case', 'catch', 'checked', 'class', 'const',
'continue', 'default', 'delegate', 'do',
'else', 'enum', 'event', 'explicit', 'extern',
'finally', 'fixed', 'for', 'foreach', 'goto',
'if', 'implicit', 'in', 'interface', 'internal',
'lock', 'namespace', 'operator',
'out', 'override', 'params', 'partial', 'private', 'protected', 'public',
'readonly', 'ref', 'return', 'sealed',
'stackalloc', 'static', 'struct', 'switch',
'throw', 'try',
'unchecked', 'unsafe', 'using', 'using static',
'virtual', 'volatile', 'while'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
types = [
'bool', 'byte', 'char', 'decimal', 'double', 'float', 'int', 'long', 'object',
'sbyte', 'short', 'string', 'uint', 'ulong', 'ushort', 'void'
]
types_tb = CaseSensitiveListTokenBuilder(types, 'type', True)
operand_types.append('type')
values = [
'base', 'false', 'null', 'this', 'true'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
terminators_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
keyword_tb,
types_tb,
values_tb,
known_operator_tb,
groupers_tb,
identifier_tb,
string_tb,
prefixed_string_tb,
slash_slash_comment_tb,
slash_star_comment_tb,
preprocessor_tb,
c_error_tb,
c_warning_tb,
c_region_tb,
c_endregion_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment'])
self.tokens = tokens
self.convert_identifiers_to_labels()
number_suffixes = ['f', 'F', 'd', 'D', 'm', 'M']
self.tokens = self.combine_tokens_and_adjacent_types(tokens, 'number', 'identifier', number_suffixes)
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types_2 = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types_2, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_preprocessor_confidence()
self.calc_paired_blockers_confidence(['{'], ['}'])
self.calc_line_length_confidence(code, self.max_expected_line) | csharp_examiner.py | import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
LeadToEndOfLineTokenBuilder
)
from cx_token_builders import (
SlashSlashCommentTokenBuilder,
SlashStarCommentTokenBuilder
)
from examiner import Examiner
class CsharpExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SlashSlashCommentTokenBuilder.__escape_z__()
SlashStarCommentTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
real_tb = RealTokenBuilder(False, False, None)
real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None)
operand_types.append('number')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 10)
prefixed_string_tb = PrefixedStringTokenBuilder('@', False, ['"'])
operand_types.append('string')
slash_slash_comment_tb = SlashSlashCommentTokenBuilder()
slash_star_comment_tb = SlashStarCommentTokenBuilder()
directives = [
'#if', '#else', '#elif', '#endif',
'#define', '#undef',
'#line', '#pragma'
]
preprocessor_tb = CaseSensitiveListTokenBuilder(directives, 'preprocessor', False)
c_warning_tb = LeadToEndOfLineTokenBuilder('#warning', True, 'preprocessor')
c_error_tb = LeadToEndOfLineTokenBuilder('#error', True, 'preprocessor')
c_region_tb = LeadToEndOfLineTokenBuilder('#region', True, 'preprocessor')
c_endregion_tb = LeadToEndOfLineTokenBuilder('#endregion', True, 'preprocessor')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
known_operators = [
'+', '-', '*', '/', '%',
'=', '==', '!=', '>', '>=', '<', '<=',
'+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=',
'!', '&', '|', '~', '<<', '>>',
'^',
'.',
'++', '--', '->', '&&', '||',
'?', '??', '?.', '?[',
'=>',
'as', 'is', 'await', 'sizeof',
'typeof', 'new'
]
self.unary_operators = [
'+', '-',
'!', '~',
'++', '--',
'new', 'sizeof', 'typeof'
]
self.postfix_operators = [
'++', '--'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
groupers = ['(', ')', ',', '[', ']', '{', '}', ':']
group_starts = ['(', '[', ',', '{']
group_ends = [')', ']', '}']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'abstract', 'break',
'case', 'catch', 'checked', 'class', 'const',
'continue', 'default', 'delegate', 'do',
'else', 'enum', 'event', 'explicit', 'extern',
'finally', 'fixed', 'for', 'foreach', 'goto',
'if', 'implicit', 'in', 'interface', 'internal',
'lock', 'namespace', 'operator',
'out', 'override', 'params', 'partial', 'private', 'protected', 'public',
'readonly', 'ref', 'return', 'sealed',
'stackalloc', 'static', 'struct', 'switch',
'throw', 'try',
'unchecked', 'unsafe', 'using', 'using static',
'virtual', 'volatile', 'while'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
types = [
'bool', 'byte', 'char', 'decimal', 'double', 'float', 'int', 'long', 'object',
'sbyte', 'short', 'string', 'uint', 'ulong', 'ushort', 'void'
]
types_tb = CaseSensitiveListTokenBuilder(types, 'type', True)
operand_types.append('type')
values = [
'base', 'false', 'null', 'this', 'true'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
terminators_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
keyword_tb,
types_tb,
values_tb,
known_operator_tb,
groupers_tb,
identifier_tb,
string_tb,
prefixed_string_tb,
slash_slash_comment_tb,
slash_star_comment_tb,
preprocessor_tb,
c_error_tb,
c_warning_tb,
c_region_tb,
c_endregion_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment'])
self.tokens = tokens
self.convert_identifiers_to_labels()
number_suffixes = ['f', 'F', 'd', 'D', 'm', 'M']
self.tokens = self.combine_tokens_and_adjacent_types(tokens, 'number', 'identifier', number_suffixes)
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types_2 = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types_2, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_preprocessor_confidence()
self.calc_paired_blockers_confidence(['{'], ['}'])
self.calc_line_length_confidence(code, self.max_expected_line) | 0.31216 | 0.068475 |
from collections import namedtuple
from enum import Enum
from math import sqrt
from typing import NamedTuple, Optional
from pathos.helpers import mp
from tqdm import tqdm
from skdecide import (
DiscreteDistribution,
EnvironmentOutcome,
GoalMDPDomain,
Space,
TransitionOutcome,
Value,
)
from skdecide.builders.domain import Actions
from skdecide.hub.solver.mcts.mcts import MCTS
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
from skdecide.utils import load_registered_solver, rollout
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(GoalMDPDomain, Actions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class MyDomain(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_applicable_actions_from(
self, memory: D.T_memory[D.T_state]
) -> D.T_agent[Space[D.T_event]]:
applicable_actions = []
if memory.y > 0:
applicable_actions.append(Action.up)
if memory.y < self.num_rows - 1:
applicable_actions.append(Action.down)
if memory.x > 0:
applicable_actions.append(Action.left)
if memory.x < self.num_cols - 1:
applicable_actions.append(Action.right)
return ListSpace(applicable_actions)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
if action == Action.left:
next_state_1 = State(max(memory.x - 1, 0), memory.y)
next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
next_state_3 = State(
max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
)
if action == Action.right:
next_state_1 = State(min(memory.x + 1, self.num_cols - 1), memory.y)
next_state_2 = State(
min(memory.x + 1, self.num_cols - 1),
min(memory.y + 1, self.num_rows - 1),
)
next_state_3 = State(
min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
)
if action == Action.up:
next_state_1 = State(memory.x, max(memory.y - 1, 0))
next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
next_state_3 = State(
min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
)
if action == Action.down:
next_state_1 = State(memory.x, min(memory.y + 1, self.num_rows - 1))
next_state_2 = State(
min(memory.x + 1, self.num_cols - 1),
min(memory.y + 1, self.num_rows - 1),
)
next_state_3 = State(
max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
)
return DiscreteDistribution(
[
(memory, 0.2),
(next_state_1, 0.4),
(next_state_2, 0.2),
(next_state_3, 0.2),
]
)
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
# every move costs 1
return Value(cost=abs(next_state.x - memory.x) + abs(next_state.y - memory.y))
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([State(x=self.num_cols - 1, y=self.num_rows - 1)])
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows])
class GridShmProxy:
_register_ = [
(State, 2),
(Action, 1),
(EnumSpace, 1),
(ListSpace, 1),
(DiscreteDistribution, 1),
(Value, 1),
(EnvironmentOutcome, 1),
(TransitionOutcome, 1),
(bool, 1),
(float, 1),
(int, 2),
]
def __init__(self):
self._proxies_ = {
State: GridShmProxy.StateProxy,
Action: GridShmProxy.ActionProxy,
EnumSpace: GridShmProxy.EnumSpaceProxy,
ListSpace: GridShmProxy.ListSpaceProxy,
DiscreteDistribution: GridShmProxy.DiscreteDistributionProxy,
Value: GridShmProxy.ValueProxy,
EnvironmentOutcome: GridShmProxy.EnvironmentOutcomeProxy,
TransitionOutcome: GridShmProxy.TransitionOutcomeProxy,
bool: GridShmProxy.BoolProxy,
float: GridShmProxy.FloatProxy,
int: GridShmProxy.IntProxy,
}
def copy(self):
p = GridShmProxy()
p._proxies_ = dict(self._proxies_)
return p
def register(self):
return GridShmProxy._register_
def initialize(self, t):
return self._proxies_[t].initialize()
def encode(self, value, shm_value):
self._proxies_[type(value)].encode(value, shm_value)
def decode(self, t, shm_value):
return self._proxies_[t].decode(shm_value)
class StateProxy:
@staticmethod
def initialize():
return mp.Array("d", [0, 0], lock=True)
@staticmethod
def encode(state, shm_state):
shm_state[0] = state.x
shm_state[1] = state.y
@staticmethod
def decode(shm_state):
return State(int(shm_state[0]), int(shm_state[1]))
class ActionProxy:
@staticmethod
def initialize():
return mp.Value("I", 0, lock=True)
@staticmethod
def encode(action, shm_action):
shm_action.value = action.value
@staticmethod
def decode(shm_action):
return Action(shm_action.value)
class EnumSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array("c", b"")
@staticmethod
def encode(val, shm_val):
pass
@staticmethod
def decode(val):
return EnumSpace(Action)
class ListSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array("b", [False, False, False, False], lock=True)
@staticmethod
def encode(val, shm_val):
for i in range(4):
shm_val[i] = False
for a in val.get_elements():
if a is Action.up:
shm_val[0] = True
elif a is Action.down:
shm_val[1] = True
elif a is Action.left:
shm_val[2] = True
elif a is Action.right:
shm_val[3] = True
@staticmethod
def decode(val):
aa = []
if val[0]:
aa.append(Action.up)
if val[1]:
aa.append(Action.down)
if val[2]:
aa.append(Action.left)
if val[3]:
aa.append(Action.right)
return ListSpace(aa)
class DiscreteDistributionProxy:
@staticmethod
def initialize():
# Don't use "[] * 4" operator since it does not deep copy the pairs but rather
# copy 4 times the pointers to the same object!
return [
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
]
@staticmethod
def encode(dd, shm_dd):
for i, o in enumerate(dd.get_values()):
GridShmProxy.StateProxy.encode(o[0], shm_dd[i][0])
shm_dd[i][1].value = o[1]
for i in range(len(dd.get_values()), 4):
shm_dd[i][1].value = -1
@staticmethod
def decode(dd):
return DiscreteDistribution(
[
(GridShmProxy.StateProxy.decode(o[0]), o[1].value)
for o in dd
if o[1].value > -0.5
]
)
class ValueProxy:
@staticmethod
def initialize():
return [mp.Value("d", 0), mp.Value("b", False)]
@staticmethod
def encode(value, shm_value):
if value.reward is not None:
shm_value[0].value = value.reward
shm_value[1].value = True
elif value.cost is not None:
shm_value[0].value = value.cost
shm_value[1].value = False
else:
shm_value[0].value = 0
shm_value[1].value = True
@staticmethod
def decode(value):
if value[1].value:
return Value(reward=value[0].value)
else:
return Value(cost=value[0].value)
class EnvironmentOutcomeProxy:
@staticmethod
def initialize():
return (
[GridShmProxy.StateProxy.initialize()]
+ GridShmProxy.ValueProxy.initialize()
+ [GridShmProxy.BoolProxy.initialize()]
)
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.observation, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return EnvironmentOutcome(
observation=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]),
)
class TransitionOutcomeProxy:
@staticmethod
def initialize():
return (
[GridShmProxy.StateProxy.initialize()]
+ GridShmProxy.ValueProxy.initialize()
+ [GridShmProxy.BoolProxy.initialize()]
)
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.state, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return TransitionOutcome(
state=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]),
)
class BoolProxy:
@staticmethod
def initialize():
return mp.Value("b", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return bool(val.value)
class FloatProxy:
@staticmethod
def initialize():
return mp.Value("d", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return float(val.value)
class IntProxy:
@staticmethod
def initialize():
return mp.Value("i", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return int(val.value)
if __name__ == "__main__":
try_solvers = [
# LRTDP
{
"name": "LRTDP",
"entry": "LRTDP",
"config": {
"domain_factory": lambda: MyDomain(),
"heuristic": lambda d, s: Value(
cost=sqrt((d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2)
),
"use_labels": True,
"time_budget": 60000,
"rollout_budget": 10000,
"max_depth": 50,
"discount": 1.0,
"epsilon": 0.001,
"online_node_garbage": True,
"continuous_planning": False,
"parallel": True,
"debug_logs": False,
},
},
# ILAO*
{
"name": "Improved-LAO*",
"entry": "ILAOstar",
"config": {
"domain_factory": lambda: MyDomain(),
"heuristic": lambda d, s: Value(
cost=sqrt((d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2)
),
"discount": 1.0,
"epsilon": 0.001,
"parallel": True,
"debug_logs": False,
},
},
# UCT-Distribution (reinforcement learning / search)
{
"name": "UCT (reinforcement learning / search)",
"entry": "UCT",
"config": {
"domain_factory": lambda: MyDomain(),
"time_budget": 1000,
"rollout_budget": 100,
"max_depth": 50,
"ucb_constant": 1.0 / sqrt(2.0),
"transition_mode": MCTS.Options.TransitionMode.Distribution,
"online_node_garbage": True,
"continuous_planning": False,
"heuristic": lambda d, s: (
Value(
cost=sqrt(
(d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2
)
),
10000,
),
"parallel": True,
"debug_logs": False,
},
},
]
# Load solvers (filtering out badly installed ones)
solvers = map(
lambda s: dict(s, entry=load_registered_solver(s["entry"])), try_solvers
)
solvers = list(filter(lambda s: s["entry"] is not None, solvers))
# Run loop to ask user input
domain = MyDomain() # MyDomain(5,5)
with tqdm(total=len(solvers) * 100) as pbar:
for s in solvers:
solver_type = s["entry"]
for i in range(50):
s["config"]["shared_memory_proxy"] = None
with solver_type(**s["config"]) as solver:
MyDomain.solve_with(solver) # ,lambda:MyDomain(5,5))
rollout(
domain,
solver,
max_steps=50,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
)
pbar.update(1)
for i in range(50):
s["config"]["shared_memory_proxy"] = GridShmProxy()
with solver_type(**s["config"]) as solver:
MyDomain.solve_with(solver) # ,lambda:MyDomain(5,5))
rollout(
domain,
solver,
max_steps=50,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
)
pbar.update(1) | tests/solvers/cpp/parallelism/test_parallel_probabilistic_algorithms.py |
from collections import namedtuple
from enum import Enum
from math import sqrt
from typing import NamedTuple, Optional
from pathos.helpers import mp
from tqdm import tqdm
from skdecide import (
DiscreteDistribution,
EnvironmentOutcome,
GoalMDPDomain,
Space,
TransitionOutcome,
Value,
)
from skdecide.builders.domain import Actions
from skdecide.hub.solver.mcts.mcts import MCTS
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
from skdecide.utils import load_registered_solver, rollout
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(GoalMDPDomain, Actions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class MyDomain(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_applicable_actions_from(
self, memory: D.T_memory[D.T_state]
) -> D.T_agent[Space[D.T_event]]:
applicable_actions = []
if memory.y > 0:
applicable_actions.append(Action.up)
if memory.y < self.num_rows - 1:
applicable_actions.append(Action.down)
if memory.x > 0:
applicable_actions.append(Action.left)
if memory.x < self.num_cols - 1:
applicable_actions.append(Action.right)
return ListSpace(applicable_actions)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
if action == Action.left:
next_state_1 = State(max(memory.x - 1, 0), memory.y)
next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
next_state_3 = State(
max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
)
if action == Action.right:
next_state_1 = State(min(memory.x + 1, self.num_cols - 1), memory.y)
next_state_2 = State(
min(memory.x + 1, self.num_cols - 1),
min(memory.y + 1, self.num_rows - 1),
)
next_state_3 = State(
min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
)
if action == Action.up:
next_state_1 = State(memory.x, max(memory.y - 1, 0))
next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
next_state_3 = State(
min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
)
if action == Action.down:
next_state_1 = State(memory.x, min(memory.y + 1, self.num_rows - 1))
next_state_2 = State(
min(memory.x + 1, self.num_cols - 1),
min(memory.y + 1, self.num_rows - 1),
)
next_state_3 = State(
max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
)
return DiscreteDistribution(
[
(memory, 0.2),
(next_state_1, 0.4),
(next_state_2, 0.2),
(next_state_3, 0.2),
]
)
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
# every move costs 1
return Value(cost=abs(next_state.x - memory.x) + abs(next_state.y - memory.y))
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([State(x=self.num_cols - 1, y=self.num_rows - 1)])
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows])
class GridShmProxy:
_register_ = [
(State, 2),
(Action, 1),
(EnumSpace, 1),
(ListSpace, 1),
(DiscreteDistribution, 1),
(Value, 1),
(EnvironmentOutcome, 1),
(TransitionOutcome, 1),
(bool, 1),
(float, 1),
(int, 2),
]
def __init__(self):
self._proxies_ = {
State: GridShmProxy.StateProxy,
Action: GridShmProxy.ActionProxy,
EnumSpace: GridShmProxy.EnumSpaceProxy,
ListSpace: GridShmProxy.ListSpaceProxy,
DiscreteDistribution: GridShmProxy.DiscreteDistributionProxy,
Value: GridShmProxy.ValueProxy,
EnvironmentOutcome: GridShmProxy.EnvironmentOutcomeProxy,
TransitionOutcome: GridShmProxy.TransitionOutcomeProxy,
bool: GridShmProxy.BoolProxy,
float: GridShmProxy.FloatProxy,
int: GridShmProxy.IntProxy,
}
def copy(self):
p = GridShmProxy()
p._proxies_ = dict(self._proxies_)
return p
def register(self):
return GridShmProxy._register_
def initialize(self, t):
return self._proxies_[t].initialize()
def encode(self, value, shm_value):
self._proxies_[type(value)].encode(value, shm_value)
def decode(self, t, shm_value):
return self._proxies_[t].decode(shm_value)
class StateProxy:
@staticmethod
def initialize():
return mp.Array("d", [0, 0], lock=True)
@staticmethod
def encode(state, shm_state):
shm_state[0] = state.x
shm_state[1] = state.y
@staticmethod
def decode(shm_state):
return State(int(shm_state[0]), int(shm_state[1]))
class ActionProxy:
@staticmethod
def initialize():
return mp.Value("I", 0, lock=True)
@staticmethod
def encode(action, shm_action):
shm_action.value = action.value
@staticmethod
def decode(shm_action):
return Action(shm_action.value)
class EnumSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array("c", b"")
@staticmethod
def encode(val, shm_val):
pass
@staticmethod
def decode(val):
return EnumSpace(Action)
class ListSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array("b", [False, False, False, False], lock=True)
@staticmethod
def encode(val, shm_val):
for i in range(4):
shm_val[i] = False
for a in val.get_elements():
if a is Action.up:
shm_val[0] = True
elif a is Action.down:
shm_val[1] = True
elif a is Action.left:
shm_val[2] = True
elif a is Action.right:
shm_val[3] = True
@staticmethod
def decode(val):
aa = []
if val[0]:
aa.append(Action.up)
if val[1]:
aa.append(Action.down)
if val[2]:
aa.append(Action.left)
if val[3]:
aa.append(Action.right)
return ListSpace(aa)
class DiscreteDistributionProxy:
@staticmethod
def initialize():
# Don't use "[] * 4" operator since it does not deep copy the pairs but rather
# copy 4 times the pointers to the same object!
return [
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
[GridShmProxy.StateProxy.initialize(), mp.Value("d", 0, lock=True)],
]
@staticmethod
def encode(dd, shm_dd):
for i, o in enumerate(dd.get_values()):
GridShmProxy.StateProxy.encode(o[0], shm_dd[i][0])
shm_dd[i][1].value = o[1]
for i in range(len(dd.get_values()), 4):
shm_dd[i][1].value = -1
@staticmethod
def decode(dd):
return DiscreteDistribution(
[
(GridShmProxy.StateProxy.decode(o[0]), o[1].value)
for o in dd
if o[1].value > -0.5
]
)
class ValueProxy:
@staticmethod
def initialize():
return [mp.Value("d", 0), mp.Value("b", False)]
@staticmethod
def encode(value, shm_value):
if value.reward is not None:
shm_value[0].value = value.reward
shm_value[1].value = True
elif value.cost is not None:
shm_value[0].value = value.cost
shm_value[1].value = False
else:
shm_value[0].value = 0
shm_value[1].value = True
@staticmethod
def decode(value):
if value[1].value:
return Value(reward=value[0].value)
else:
return Value(cost=value[0].value)
class EnvironmentOutcomeProxy:
@staticmethod
def initialize():
return (
[GridShmProxy.StateProxy.initialize()]
+ GridShmProxy.ValueProxy.initialize()
+ [GridShmProxy.BoolProxy.initialize()]
)
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.observation, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return EnvironmentOutcome(
observation=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]),
)
class TransitionOutcomeProxy:
@staticmethod
def initialize():
return (
[GridShmProxy.StateProxy.initialize()]
+ GridShmProxy.ValueProxy.initialize()
+ [GridShmProxy.BoolProxy.initialize()]
)
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.state, shm_outcome[0])
GridShmProxy.ValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return TransitionOutcome(
state=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.ValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]),
)
class BoolProxy:
@staticmethod
def initialize():
return mp.Value("b", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return bool(val.value)
class FloatProxy:
@staticmethod
def initialize():
return mp.Value("d", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return float(val.value)
class IntProxy:
@staticmethod
def initialize():
return mp.Value("i", False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return int(val.value)
if __name__ == "__main__":
try_solvers = [
# LRTDP
{
"name": "LRTDP",
"entry": "LRTDP",
"config": {
"domain_factory": lambda: MyDomain(),
"heuristic": lambda d, s: Value(
cost=sqrt((d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2)
),
"use_labels": True,
"time_budget": 60000,
"rollout_budget": 10000,
"max_depth": 50,
"discount": 1.0,
"epsilon": 0.001,
"online_node_garbage": True,
"continuous_planning": False,
"parallel": True,
"debug_logs": False,
},
},
# ILAO*
{
"name": "Improved-LAO*",
"entry": "ILAOstar",
"config": {
"domain_factory": lambda: MyDomain(),
"heuristic": lambda d, s: Value(
cost=sqrt((d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2)
),
"discount": 1.0,
"epsilon": 0.001,
"parallel": True,
"debug_logs": False,
},
},
# UCT-Distribution (reinforcement learning / search)
{
"name": "UCT (reinforcement learning / search)",
"entry": "UCT",
"config": {
"domain_factory": lambda: MyDomain(),
"time_budget": 1000,
"rollout_budget": 100,
"max_depth": 50,
"ucb_constant": 1.0 / sqrt(2.0),
"transition_mode": MCTS.Options.TransitionMode.Distribution,
"online_node_garbage": True,
"continuous_planning": False,
"heuristic": lambda d, s: (
Value(
cost=sqrt(
(d.num_cols - 1 - s.x) ** 2 + (d.num_rows - 1 - s.y) ** 2
)
),
10000,
),
"parallel": True,
"debug_logs": False,
},
},
]
# Load solvers (filtering out badly installed ones)
solvers = map(
lambda s: dict(s, entry=load_registered_solver(s["entry"])), try_solvers
)
solvers = list(filter(lambda s: s["entry"] is not None, solvers))
# Run loop to ask user input
domain = MyDomain() # MyDomain(5,5)
with tqdm(total=len(solvers) * 100) as pbar:
for s in solvers:
solver_type = s["entry"]
for i in range(50):
s["config"]["shared_memory_proxy"] = None
with solver_type(**s["config"]) as solver:
MyDomain.solve_with(solver) # ,lambda:MyDomain(5,5))
rollout(
domain,
solver,
max_steps=50,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
)
pbar.update(1)
for i in range(50):
s["config"]["shared_memory_proxy"] = GridShmProxy()
with solver_type(**s["config"]) as solver:
MyDomain.solve_with(solver) # ,lambda:MyDomain(5,5))
rollout(
domain,
solver,
max_steps=50,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
)
pbar.update(1) | 0.914853 | 0.363703 |
import discord
from discord.ext import commands
from mysqldb import the_database
from typing import List, Union, Optional
from extra import utils
class UserBabiesTable(commands.Cog):
""" Class for the UserBabies table and its commands and methods. """
def __init__(self, client: commands.Bot) -> None:
""" Class init method. """
self.client = client
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def create_table_user_babies(self, ctx) -> None:
""" Creates the UserBabies table in the database. """
member: discord.Member = ctx.author
if await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table already exists, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("""CREATE TABLE UserBabies (
parent_one BIGINT NOT NULL,
parent_two BIGINT NOT NULL,
baby_name VARCHAR(25) DEFAULT 'Embryo',
baby_class VARCHAR(25) DEFAULT 'Embryo',
life_points TINYINT(3) DEFAULT 100,
food TINYINT(3) DEFAULT 100,
life_points_ts BIGINT NOT NULL,
food_ts BIGINT NOT NULL,
birth_ts BIGINT DEFAULT NULL,
PRIMARY KEY (parent_one, parent_two)
) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci
""")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table created, {member.mention}!**")
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def drop_table_user_babies(self, ctx) -> None:
""" Drops the UserBabies table from the database. """
member: discord.Member = ctx.author
if not await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table doesn't exist, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("DROP TABLE UserBabies")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table dropped, {member.mention}!**")
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def reset_table_user_babies(self, ctx) -> None:
""" Resets the UserBabies table in the database. """
member: discord.Member = ctx.author
if not await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table doesn't exist yet, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM UserBabies")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table reset, {member.mention}!**")
async def check_user_babies_table_exists(self) -> bool:
""" Checks whether the UserBabies table exists in the database. """
mycursor, _ = await the_database()
await mycursor.execute("SHOW TABLE STATUS LIKE 'UserBabies'")
exists = await mycursor.fetchone()
await mycursor.close()
if exists:
return True
else:
return False
async def insert_user_baby(self,
parent_one: int, parent_two: int,
baby_name: Optional[str] = None, baby_class: Optional[str] = None) -> None:
""" Inserts a User Baby.
:param parent_one: The parent one of the baby.
:param parent_two: The parent two of the baby.
:param baby_name: The name of the baby. [Optional]
:param baby_class: The class of the baby. [Optional] """
current_ts = await utils.get_timestamp()
current_ts = current_ts + 3600
mycursor, db = await the_database()
if baby_name and baby_class:
await mycursor.execute("""
INSERT INTO UserBabies (
parent_one, parent_two, baby_name, baby_class, life_points_ts, food_ts, birth_ts
) VALUES (%s, %s, %s, %s, %s, %s, %s)""", (parent_one, parent_two, baby_name, baby_class, current_ts, current_ts, current_ts))
else:
await mycursor.execute("""
INSERT INTO UserBabies (
parent_one, parent_two, life_points_ts, food_ts, birth_ts
) VALUES (%s, %s, %s, %s, %s)""", (parent_one, parent_two, current_ts, current_ts, current_ts))
await db.commit()
await mycursor.close()
async def get_user_baby(self, parent_id: int) -> List[Union[str, int]]:
""" Get the user's baby.
:param parent_id: The ID of one of the baby's parents. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies WHERE parent_one = %s OR parent_two = %s", (parent_id, parent_id))
user_baby = await mycursor.fetchone()
await mycursor.close()
return user_baby
async def get_babies(self) -> List[List[Union[str, int]]]:
""" Get all user babies. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies")
user_babies = await mycursor.fetchall()
await mycursor.close()
return user_babies
async def get_hungry_babies(self, current_ts: int) -> List[List[Union[str, int]]]:
""" Get all user hungry babies.
:param current_ts: The current timestamp. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies WHERE %s - food_ts >= 3600", (current_ts,))
user_babies = await mycursor.fetchall()
await mycursor.close()
return user_babies
async def update_user_baby_name(self, parent_id: int, baby_name: str) -> None:
""" Updates the User Baby's name.
:param parent_id: The ID of one of the baby's parents.
:param baby_name: The new baby name to update to. """
mycursor, db = await the_database()
await mycursor.execute("UPDATE UserBabies SET baby_name = %s WHERE parent_one = %s OR parent_two = %s", (baby_name, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_class(self, parent_id: int, baby_class: str) -> None:
""" Updates the User Baby's class.
:param parent_id: The ID of one of the the baby's parents.
:param baby_class: The new baby class to update to. """
mycursor, db = await the_database()
await mycursor.execute("UPDATE UserBabies SET baby_class = %s WHERE parent_one = %s OR parent_two = %s", (baby_class, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_lp(self, parent_id: int, increment: int = 5, current_ts: Optional[int] = None) -> None:
""" Updates the User Baby's life points.
:param parent_id: The ID of one of the baby's parents.
:param increment: The incremention value to apply. [Default = 5] (Can be negative)
:param current_ts: The current timestamp. [Optional] """
mycursor, db = await the_database()
if current_ts:
await mycursor.execute("""UPDATE UserBabies SET life_points = life_points + %s, life_points_ts = %s WHERE parent_one = %s OR parent_two = %s
""", (increment, current_ts, parent_id, parent_id))
else:
await mycursor.execute("UPDATE UserBabies SET life_points = life_points + %s WHERE parent_one = %s OR parent_two = %s", (increment, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_food(self, parent_id: int, increment: int = 5, current_ts: Optional[int] = None) -> None:
""" Updates the User Baby's food status.
:param parent_id: The ID of one of the baby's parents.
:param increment: The incremention value to apply. [Default = 5] (Can be negative)
:param current_ts: The current timestamp. [Optional] """
mycursor, db = await the_database()
if current_ts:
await mycursor.execute("""UPDATE UserBabies SET food = food + %s, food_ts = %s WHERE parent_one = %s OR parent_two = %s
""", (increment, current_ts, parent_id, parent_id))
else:
await mycursor.execute("UPDATE UserBabies SET food = food + %s WHERE parent_one = %s OR parent_two = %s", (increment, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def delete_user_baby(self, parent_id: int) -> None:
""" Deletes the user's baby.
:param parent_id: The ID of one of the baby's parents. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM UserBabies WHERE parent_one = %s or parent_two = %s", (parent_id, parent_id))
await db.commit()
await mycursor.close() | extra/slothclasses/userbabies.py | import discord
from discord.ext import commands
from mysqldb import the_database
from typing import List, Union, Optional
from extra import utils
class UserBabiesTable(commands.Cog):
""" Class for the UserBabies table and its commands and methods. """
def __init__(self, client: commands.Bot) -> None:
""" Class init method. """
self.client = client
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def create_table_user_babies(self, ctx) -> None:
""" Creates the UserBabies table in the database. """
member: discord.Member = ctx.author
if await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table already exists, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("""CREATE TABLE UserBabies (
parent_one BIGINT NOT NULL,
parent_two BIGINT NOT NULL,
baby_name VARCHAR(25) DEFAULT 'Embryo',
baby_class VARCHAR(25) DEFAULT 'Embryo',
life_points TINYINT(3) DEFAULT 100,
food TINYINT(3) DEFAULT 100,
life_points_ts BIGINT NOT NULL,
food_ts BIGINT NOT NULL,
birth_ts BIGINT DEFAULT NULL,
PRIMARY KEY (parent_one, parent_two)
) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci
""")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table created, {member.mention}!**")
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def drop_table_user_babies(self, ctx) -> None:
""" Drops the UserBabies table from the database. """
member: discord.Member = ctx.author
if not await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table doesn't exist, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("DROP TABLE UserBabies")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table dropped, {member.mention}!**")
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def reset_table_user_babies(self, ctx) -> None:
""" Resets the UserBabies table in the database. """
member: discord.Member = ctx.author
if not await self.check_user_babies_table_exists():
return await ctx.send(f"**The UserBabies table doesn't exist yet, {member.mention}!**")
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM UserBabies")
await db.commit()
await mycursor.close()
await ctx.send(f"**`UserBabies` table reset, {member.mention}!**")
async def check_user_babies_table_exists(self) -> bool:
""" Checks whether the UserBabies table exists in the database. """
mycursor, _ = await the_database()
await mycursor.execute("SHOW TABLE STATUS LIKE 'UserBabies'")
exists = await mycursor.fetchone()
await mycursor.close()
if exists:
return True
else:
return False
async def insert_user_baby(self,
parent_one: int, parent_two: int,
baby_name: Optional[str] = None, baby_class: Optional[str] = None) -> None:
""" Inserts a User Baby.
:param parent_one: The parent one of the baby.
:param parent_two: The parent two of the baby.
:param baby_name: The name of the baby. [Optional]
:param baby_class: The class of the baby. [Optional] """
current_ts = await utils.get_timestamp()
current_ts = current_ts + 3600
mycursor, db = await the_database()
if baby_name and baby_class:
await mycursor.execute("""
INSERT INTO UserBabies (
parent_one, parent_two, baby_name, baby_class, life_points_ts, food_ts, birth_ts
) VALUES (%s, %s, %s, %s, %s, %s, %s)""", (parent_one, parent_two, baby_name, baby_class, current_ts, current_ts, current_ts))
else:
await mycursor.execute("""
INSERT INTO UserBabies (
parent_one, parent_two, life_points_ts, food_ts, birth_ts
) VALUES (%s, %s, %s, %s, %s)""", (parent_one, parent_two, current_ts, current_ts, current_ts))
await db.commit()
await mycursor.close()
async def get_user_baby(self, parent_id: int) -> List[Union[str, int]]:
""" Get the user's baby.
:param parent_id: The ID of one of the baby's parents. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies WHERE parent_one = %s OR parent_two = %s", (parent_id, parent_id))
user_baby = await mycursor.fetchone()
await mycursor.close()
return user_baby
async def get_babies(self) -> List[List[Union[str, int]]]:
""" Get all user babies. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies")
user_babies = await mycursor.fetchall()
await mycursor.close()
return user_babies
async def get_hungry_babies(self, current_ts: int) -> List[List[Union[str, int]]]:
""" Get all user hungry babies.
:param current_ts: The current timestamp. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM UserBabies WHERE %s - food_ts >= 3600", (current_ts,))
user_babies = await mycursor.fetchall()
await mycursor.close()
return user_babies
async def update_user_baby_name(self, parent_id: int, baby_name: str) -> None:
""" Updates the User Baby's name.
:param parent_id: The ID of one of the baby's parents.
:param baby_name: The new baby name to update to. """
mycursor, db = await the_database()
await mycursor.execute("UPDATE UserBabies SET baby_name = %s WHERE parent_one = %s OR parent_two = %s", (baby_name, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_class(self, parent_id: int, baby_class: str) -> None:
""" Updates the User Baby's class.
:param parent_id: The ID of one of the the baby's parents.
:param baby_class: The new baby class to update to. """
mycursor, db = await the_database()
await mycursor.execute("UPDATE UserBabies SET baby_class = %s WHERE parent_one = %s OR parent_two = %s", (baby_class, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_lp(self, parent_id: int, increment: int = 5, current_ts: Optional[int] = None) -> None:
""" Updates the User Baby's life points.
:param parent_id: The ID of one of the baby's parents.
:param increment: The incremention value to apply. [Default = 5] (Can be negative)
:param current_ts: The current timestamp. [Optional] """
mycursor, db = await the_database()
if current_ts:
await mycursor.execute("""UPDATE UserBabies SET life_points = life_points + %s, life_points_ts = %s WHERE parent_one = %s OR parent_two = %s
""", (increment, current_ts, parent_id, parent_id))
else:
await mycursor.execute("UPDATE UserBabies SET life_points = life_points + %s WHERE parent_one = %s OR parent_two = %s", (increment, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def update_user_baby_food(self, parent_id: int, increment: int = 5, current_ts: Optional[int] = None) -> None:
""" Updates the User Baby's food status.
:param parent_id: The ID of one of the baby's parents.
:param increment: The incremention value to apply. [Default = 5] (Can be negative)
:param current_ts: The current timestamp. [Optional] """
mycursor, db = await the_database()
if current_ts:
await mycursor.execute("""UPDATE UserBabies SET food = food + %s, food_ts = %s WHERE parent_one = %s OR parent_two = %s
""", (increment, current_ts, parent_id, parent_id))
else:
await mycursor.execute("UPDATE UserBabies SET food = food + %s WHERE parent_one = %s OR parent_two = %s", (increment, parent_id, parent_id))
await db.commit()
await mycursor.close()
async def delete_user_baby(self, parent_id: int) -> None:
""" Deletes the user's baby.
:param parent_id: The ID of one of the baby's parents. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM UserBabies WHERE parent_one = %s or parent_two = %s", (parent_id, parent_id))
await db.commit()
await mycursor.close() | 0.703346 | 0.150434 |
import asyncio
import json
import asyncpg
import enum
import time
from bs4 import BeautifulSoup
from bs4.element import Tag, ResultSet
import aiofiles
from aiohttp import ClientSession, TCPConnector, ClientTimeout, ServerDisconnectedError, ClientConnectorError
from asyncpg import Pool
# 全局数据库信息 = > config.json
CONFIG: dict = None
# 全局数据库连接池
POOL: Pool = None
# 全局http回话
SESSION: ClientSession = None
# 区划代码首页地址
URL_BASE: str = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/index.html'
# 区划代码发布日期字典
DATE_DICT: dict[int, str] = None
# 休眠
TIME_SLEEP: int = 0.5
# 数据缓存
DATA_TEMP: list[tuple[int, str, str, str, int, int, int, list[int], str]] = []
# 城市缓存
DATA_CITY: list[tuple[str, int, list[int], str]] = []
# 当前年
CONTEXT_YEAR: int = None
class AreaType(enum.Enum):
Province = 1 << 48
City = 1 << 36
Country = 1 << 24
Town = 1 << 12
Village = 1
async def main() -> None:
start_time = time.time()
await init()
await start()
await SESSION.close()
out('main', f'程序共耗时{time_use(start_time)}s')
async def init() -> None:
# 1 初始化配置信息
await init_config()
# 2 初始化数据库
await init_sql()
# 3 初始化table
await init_table()
# 4 初始化session
await init_session()
# 5 初始化日期字典
await init_date()
async def init_config() -> None:
global CONFIG
info = await read_file('config.json')
out('init_config', info)
CONFIG = json.loads(info)
out('init_config', '初始化配置信息成功')
async def init_sql() -> None:
global POOL
conn: str = CONFIG['ODBC']
out('init_sql', f'正在连接 > {conn}')
POOL = await asyncpg.create_pool(conn)
out('init_sql', '数据库连接成功')
async def init_table() -> None:
sql = await read_file('table.sql')
out('init_table', f'初始化表 > {sql}')
out('init_table', f'插入语句 > {CONFIG["InsertSQL"]}')
async with POOL.acquire() as conn:
await conn.execute(sql)
out('init_table', '数据表初始化完成')
async def init_session() -> None:
out('init_session', '初始化Session')
time_out: int = 1
conn_limit: int = 50
header_dic: dict[str, str] = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62',
'referer': URL_BASE}
global SESSION
SESSION = ClientSession(
timeout=ClientTimeout(total=time_out),
headers=header_dic,
connector=TCPConnector(limit=conn_limit, force_close=True))
out('init_session', f'Session初始化成功 > 超时:{time_out} 限制连接数:{conn_limit}')
async def init_date() -> None:
body = await get_data(URL_BASE)
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
date_dict: dict[int, str] = {}
for i in html.select('ul.center_list_contlist span.cont_tit'):
date = i.select('font')
date_dict[int(date[0].text.replace('年', ''))] = date[1].text
global DATE_DICT
DATE_DICT = date_dict
out('init_date', '区域数据发布日期初始化完成')
out('init_date', DATE_DICT)
async def start() -> None:
for year in CONFIG['Year']:
if(year in DATE_DICT):
global CONTEXT_YEAR
CONTEXT_YEAR = year
out('start', f'开始下载{year}年数据')
await make_data()
else:
out('start', f'未找到{year}年数据')
async def make_data() -> None:
"""组装数据 核心函数大变样 以市为分界线进行分区读取 增加模块的专一性
拆分功能职责 将通用部分声明称公共变量"""
# 加载市以上的行政单位 包括城市
url = f'{trim_right(URL_BASE)}{CONTEXT_YEAR}/index.html'
body = await get_data(url)
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(html)
provinces = build_data(data=page_rows[1],
type=page_rows[0],
page_url=url)
out('make_data', '加载城市数据')
global DATA_CITY
for p in provinces:
ps = p[2].copy()
ps.append(p[1])
p_body = await get_data(p[0])
p_html = BeautifulSoup(p_body, 'html.parser', from_encoding='gb18030')
p_page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(p_html)
DATA_CITY += build_data(data=p_page_rows[1],
type=p_page_rows[0],
page_url=p[0],
parent_id=p[1],
parents_id=ps,
parent_full_name=p[3])
for c in DATA_CITY:
out('make_data', f'执行下载数据 >> {c[3]}')
start_time = time.time()
await next_down(c)
out('make_data', f'{c[3]} 数据下载成功 用时{time_use(start_time)}s')
await save_data()
# 完成一年的数据加载后清除城市缓存信息
DATA_CITY.clear()
async def next_down(info: tuple[str, int, list[int], str], is_rec: bool = True) -> None:
"""加载市以下的行政单位 不包括城市"""
ps = info[2].copy()
ps.append(info[1])
body = await get_data(info[0])
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
next_page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(html)
if(next_page_rows == None):
out('next_down', f'奇奇怪怪日志 url >> {info[0]}')
return
next_infos = build_data(data=next_page_rows[1],
type=next_page_rows[0],
page_url=info[0],
parent_id=info[1],
parents_id=ps,
parent_full_name=info[3])
if(next_infos and is_rec):
for ni in next_infos:
await next_down(ni)
async def save_data() -> None:
if(DATA_TEMP):
start_time = time.time()
async with POOL.acquire() as conn:
async with conn.transaction():
await conn.executemany(CONFIG['InsertSQL'], DATA_TEMP)
out('save_data', f'已插入数据{len(DATA_TEMP)}条 耗时{time_use(start_time)}s')
DATA_TEMP.clear()
def read_data(html: BeautifulSoup) -> tuple[AreaType, ResultSet[Tag]]:
"""读取数据 全新改版 核心思想不变 增加异常数据报错 为空时是大胡同街道场景"""
rows = html.select('tr.villagetr')
if(rows):
return (AreaType.Village, rows)
rows = html.select('tr.towntr')
if(rows):
return (AreaType.Town, rows)
rows = html.select('tr.countytr')
if(rows):
return (AreaType.Country, rows)
rows = html.select('tr.citytr')
if(rows):
return (AreaType.City, rows)
rows = html.select('tr.provincetr a')
if(rows):
return (AreaType.Province, rows)
if(html.select('a.STYLE3')):
out('read_data', f'注意奇奇怪怪发生啦 {html.prettify()}')
return None
else:
out('read_data', html.prettify())
raise Exception('捕获到异常数据')
def build_data(data: ResultSet[Tag],
type: AreaType,
page_url: str,
parent_id: int = None,
parents_id: list[int] = [],
parent_full_name: str = '') -> list[tuple[str, int, list[int], str]]:
"""构建数据对象 这个地方应该放回下级对象的所需的本方法所有参数 以满足递归调用"""
next_base_url = trim_right(page_url)
loop = len(data)
if(type == AreaType.Village):
for i in range(loop):
e: ResultSet[Tag] = data[i].find_all('td')
name: str = e[2].text
model = (type.value * (i+1) + parent_id, e[0].text, name,
f'{parent_full_name}/{name}', int(e[1].text),
level(type), CONTEXT_YEAR, parents_id, DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
return []
elif(type == AreaType.Province):
next_infos: list = []
for i in range(loop):
id = type.value * (i+1)
e: Tag = data[i]
href: str = e.attrs['href']
name: str = e.text
model = (id, href[0: 2].ljust(12, '0'), name, name,
None, level(type), CONTEXT_YEAR, [], DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
next_info = (f'{next_base_url}{href}', id, parents_id, name)
next_infos.append(next_info)
return next_infos
else:
next_infos: list = []
for i in range(loop):
id = type.value * (i+1) + parent_id
e: ResultSet[Tag] = data[i].find_all('td')
name: str = e[1].text
full_name = f'{parent_full_name}/{name}'
model = (id, e[0].text, name, full_name, None, level(type),
CONTEXT_YEAR, parents_id, DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
a: Tag = e[0].find('a')
if(a):
url = f"{next_base_url}{a.attrs['href']}"
next_info = (url, id, parents_id, full_name)
next_infos.append(next_info)
return next_infos
def level(type: AreaType) -> int:
if(type == AreaType.Village):
return 5
elif(type == AreaType.Town):
return 4
elif(type == AreaType.Country):
return 3
elif(type == AreaType.City):
return 2
else:
return 1
async def get_data(url: str) -> bytes:
"""当404网站出现时 该方法会返回None 使用该方法需要判断是否为空"""
try:
async with SESSION.get(url) as resp:
if(resp.status == 200):
return await resp.content.read()
elif(resp.status == 404):
body = await resp.text()
out('get_data', f'404出现了 {url} {body}')
return None
elif(resp.status == 502):
body = await resp.text()
out('get_data', f'502出现了 {TIME_SLEEP}秒后重试 {url} {body}')
time.sleep(TIME_SLEEP)
return await get_data(url)
else:
out('get_data', '警告 捕获到未知异常 下面是当前页面请求体')
out('get_data', await resp.text())
raise Exception("get_data")
# TimeoutError类名与urllib类库的超时异常类重名 这里需要指定
except (asyncio.exceptions.TimeoutError, ServerDisconnectedError, ClientConnectorError):
out('get_data', f'读取超时 {TIME_SLEEP}秒后重试 {url}')
time.sleep(TIME_SLEEP)
return await get_data(url)
async def read_file(file_name: str) -> str:
async with aiofiles.open(file_name) as file:
return await file.read()
def time_use(start_time: float) -> float:
return round(time.time()-start_time, 2)
def out(mode: str, desc: any) -> None:
print(f"{time.strftime('[%H:%M:%S]', time.localtime())} [{mode}] {desc}")
def trim_right(str: str) -> str:
"""移除该字符串从右往左数第一个'/'右边的字符"""
return str[: str.rfind('/')+1]
if __name__ == '__main__':
try:
asyncio.run(main())
except KeyboardInterrupt:
out('程序 ctrl + c 中止') | AreaInfo.py | import asyncio
import json
import asyncpg
import enum
import time
from bs4 import BeautifulSoup
from bs4.element import Tag, ResultSet
import aiofiles
from aiohttp import ClientSession, TCPConnector, ClientTimeout, ServerDisconnectedError, ClientConnectorError
from asyncpg import Pool
# 全局数据库信息 = > config.json
CONFIG: dict = None
# 全局数据库连接池
POOL: Pool = None
# 全局http回话
SESSION: ClientSession = None
# 区划代码首页地址
URL_BASE: str = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/index.html'
# 区划代码发布日期字典
DATE_DICT: dict[int, str] = None
# 休眠
TIME_SLEEP: int = 0.5
# 数据缓存
DATA_TEMP: list[tuple[int, str, str, str, int, int, int, list[int], str]] = []
# 城市缓存
DATA_CITY: list[tuple[str, int, list[int], str]] = []
# 当前年
CONTEXT_YEAR: int = None
class AreaType(enum.Enum):
Province = 1 << 48
City = 1 << 36
Country = 1 << 24
Town = 1 << 12
Village = 1
async def main() -> None:
start_time = time.time()
await init()
await start()
await SESSION.close()
out('main', f'程序共耗时{time_use(start_time)}s')
async def init() -> None:
# 1 初始化配置信息
await init_config()
# 2 初始化数据库
await init_sql()
# 3 初始化table
await init_table()
# 4 初始化session
await init_session()
# 5 初始化日期字典
await init_date()
async def init_config() -> None:
global CONFIG
info = await read_file('config.json')
out('init_config', info)
CONFIG = json.loads(info)
out('init_config', '初始化配置信息成功')
async def init_sql() -> None:
global POOL
conn: str = CONFIG['ODBC']
out('init_sql', f'正在连接 > {conn}')
POOL = await asyncpg.create_pool(conn)
out('init_sql', '数据库连接成功')
async def init_table() -> None:
sql = await read_file('table.sql')
out('init_table', f'初始化表 > {sql}')
out('init_table', f'插入语句 > {CONFIG["InsertSQL"]}')
async with POOL.acquire() as conn:
await conn.execute(sql)
out('init_table', '数据表初始化完成')
async def init_session() -> None:
out('init_session', '初始化Session')
time_out: int = 1
conn_limit: int = 50
header_dic: dict[str, str] = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62',
'referer': URL_BASE}
global SESSION
SESSION = ClientSession(
timeout=ClientTimeout(total=time_out),
headers=header_dic,
connector=TCPConnector(limit=conn_limit, force_close=True))
out('init_session', f'Session初始化成功 > 超时:{time_out} 限制连接数:{conn_limit}')
async def init_date() -> None:
body = await get_data(URL_BASE)
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
date_dict: dict[int, str] = {}
for i in html.select('ul.center_list_contlist span.cont_tit'):
date = i.select('font')
date_dict[int(date[0].text.replace('年', ''))] = date[1].text
global DATE_DICT
DATE_DICT = date_dict
out('init_date', '区域数据发布日期初始化完成')
out('init_date', DATE_DICT)
async def start() -> None:
for year in CONFIG['Year']:
if(year in DATE_DICT):
global CONTEXT_YEAR
CONTEXT_YEAR = year
out('start', f'开始下载{year}年数据')
await make_data()
else:
out('start', f'未找到{year}年数据')
async def make_data() -> None:
"""组装数据 核心函数大变样 以市为分界线进行分区读取 增加模块的专一性
拆分功能职责 将通用部分声明称公共变量"""
# 加载市以上的行政单位 包括城市
url = f'{trim_right(URL_BASE)}{CONTEXT_YEAR}/index.html'
body = await get_data(url)
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(html)
provinces = build_data(data=page_rows[1],
type=page_rows[0],
page_url=url)
out('make_data', '加载城市数据')
global DATA_CITY
for p in provinces:
ps = p[2].copy()
ps.append(p[1])
p_body = await get_data(p[0])
p_html = BeautifulSoup(p_body, 'html.parser', from_encoding='gb18030')
p_page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(p_html)
DATA_CITY += build_data(data=p_page_rows[1],
type=p_page_rows[0],
page_url=p[0],
parent_id=p[1],
parents_id=ps,
parent_full_name=p[3])
for c in DATA_CITY:
out('make_data', f'执行下载数据 >> {c[3]}')
start_time = time.time()
await next_down(c)
out('make_data', f'{c[3]} 数据下载成功 用时{time_use(start_time)}s')
await save_data()
# 完成一年的数据加载后清除城市缓存信息
DATA_CITY.clear()
async def next_down(info: tuple[str, int, list[int], str], is_rec: bool = True) -> None:
"""加载市以下的行政单位 不包括城市"""
ps = info[2].copy()
ps.append(info[1])
body = await get_data(info[0])
html = BeautifulSoup(body, 'html.parser', from_encoding='gb18030')
next_page_rows: tuple[AreaType, ResultSet[Tag]] = read_data(html)
if(next_page_rows == None):
out('next_down', f'奇奇怪怪日志 url >> {info[0]}')
return
next_infos = build_data(data=next_page_rows[1],
type=next_page_rows[0],
page_url=info[0],
parent_id=info[1],
parents_id=ps,
parent_full_name=info[3])
if(next_infos and is_rec):
for ni in next_infos:
await next_down(ni)
async def save_data() -> None:
if(DATA_TEMP):
start_time = time.time()
async with POOL.acquire() as conn:
async with conn.transaction():
await conn.executemany(CONFIG['InsertSQL'], DATA_TEMP)
out('save_data', f'已插入数据{len(DATA_TEMP)}条 耗时{time_use(start_time)}s')
DATA_TEMP.clear()
def read_data(html: BeautifulSoup) -> tuple[AreaType, ResultSet[Tag]]:
"""读取数据 全新改版 核心思想不变 增加异常数据报错 为空时是大胡同街道场景"""
rows = html.select('tr.villagetr')
if(rows):
return (AreaType.Village, rows)
rows = html.select('tr.towntr')
if(rows):
return (AreaType.Town, rows)
rows = html.select('tr.countytr')
if(rows):
return (AreaType.Country, rows)
rows = html.select('tr.citytr')
if(rows):
return (AreaType.City, rows)
rows = html.select('tr.provincetr a')
if(rows):
return (AreaType.Province, rows)
if(html.select('a.STYLE3')):
out('read_data', f'注意奇奇怪怪发生啦 {html.prettify()}')
return None
else:
out('read_data', html.prettify())
raise Exception('捕获到异常数据')
def build_data(data: ResultSet[Tag],
type: AreaType,
page_url: str,
parent_id: int = None,
parents_id: list[int] = [],
parent_full_name: str = '') -> list[tuple[str, int, list[int], str]]:
"""构建数据对象 这个地方应该放回下级对象的所需的本方法所有参数 以满足递归调用"""
next_base_url = trim_right(page_url)
loop = len(data)
if(type == AreaType.Village):
for i in range(loop):
e: ResultSet[Tag] = data[i].find_all('td')
name: str = e[2].text
model = (type.value * (i+1) + parent_id, e[0].text, name,
f'{parent_full_name}/{name}', int(e[1].text),
level(type), CONTEXT_YEAR, parents_id, DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
return []
elif(type == AreaType.Province):
next_infos: list = []
for i in range(loop):
id = type.value * (i+1)
e: Tag = data[i]
href: str = e.attrs['href']
name: str = e.text
model = (id, href[0: 2].ljust(12, '0'), name, name,
None, level(type), CONTEXT_YEAR, [], DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
next_info = (f'{next_base_url}{href}', id, parents_id, name)
next_infos.append(next_info)
return next_infos
else:
next_infos: list = []
for i in range(loop):
id = type.value * (i+1) + parent_id
e: ResultSet[Tag] = data[i].find_all('td')
name: str = e[1].text
full_name = f'{parent_full_name}/{name}'
model = (id, e[0].text, name, full_name, None, level(type),
CONTEXT_YEAR, parents_id, DATE_DICT[CONTEXT_YEAR])
DATA_TEMP.append(model)
a: Tag = e[0].find('a')
if(a):
url = f"{next_base_url}{a.attrs['href']}"
next_info = (url, id, parents_id, full_name)
next_infos.append(next_info)
return next_infos
def level(type: AreaType) -> int:
if(type == AreaType.Village):
return 5
elif(type == AreaType.Town):
return 4
elif(type == AreaType.Country):
return 3
elif(type == AreaType.City):
return 2
else:
return 1
async def get_data(url: str) -> bytes:
"""当404网站出现时 该方法会返回None 使用该方法需要判断是否为空"""
try:
async with SESSION.get(url) as resp:
if(resp.status == 200):
return await resp.content.read()
elif(resp.status == 404):
body = await resp.text()
out('get_data', f'404出现了 {url} {body}')
return None
elif(resp.status == 502):
body = await resp.text()
out('get_data', f'502出现了 {TIME_SLEEP}秒后重试 {url} {body}')
time.sleep(TIME_SLEEP)
return await get_data(url)
else:
out('get_data', '警告 捕获到未知异常 下面是当前页面请求体')
out('get_data', await resp.text())
raise Exception("get_data")
# TimeoutError类名与urllib类库的超时异常类重名 这里需要指定
except (asyncio.exceptions.TimeoutError, ServerDisconnectedError, ClientConnectorError):
out('get_data', f'读取超时 {TIME_SLEEP}秒后重试 {url}')
time.sleep(TIME_SLEEP)
return await get_data(url)
async def read_file(file_name: str) -> str:
async with aiofiles.open(file_name) as file:
return await file.read()
def time_use(start_time: float) -> float:
return round(time.time()-start_time, 2)
def out(mode: str, desc: any) -> None:
print(f"{time.strftime('[%H:%M:%S]', time.localtime())} [{mode}] {desc}")
def trim_right(str: str) -> str:
"""移除该字符串从右往左数第一个'/'右边的字符"""
return str[: str.rfind('/')+1]
if __name__ == '__main__':
try:
asyncio.run(main())
except KeyboardInterrupt:
out('程序 ctrl + c 中止') | 0.128895 | 0.108095 |
import time
import struct
import socket
import threading
import numpy as np
from .BCIDecoder import generate_simulation_data
from . import logger
simulationMode = True
maxLength = 3600 # Seconds
class SimulationDataGenerator(object):
''' Generate simulation data '''
def __init__(self):
''' Initialize the simulation data '''
self.all_data, _ = generate_simulation_data()
self.raw = self.all_data.copy()
self.ptr = 0
logger.debug(f'Simulation data ({self.all_data.shape}) is generated.')
def reset(self):
''' Reset the simulation data generator,
the generator will work as it was initialized.
'''
self.all_data = self.raw
self.ptr = 0
logger.debug(
f'Simulation restarted from begining, the data has been restored.')
def pop(self, length=40):
''' Pop the simulation data from the top [length]
Args:
- @length: The length to be popped from the top.
'''
if length < self.all_data.shape[1]:
d = self.all_data[:, :length]
self.all_data = self.all_data[:, length:]
# logger.debug(f'Normally fetch data for {length}.')
return d
if length == self.all_data.shape[1]:
d = self.all_data.copy()
# logger.debug(f'Normally fetch data for {length}.')
self.all_data = self.raw
logger.debug(f'Current data is empty, restart from begining.')
return d
if length < self.all_data.shape[1]:
d0 = self.all_data.copy()
logger.debug(f'Partly fetch data 1 for {d0.shape[1]}.')
self.all_data = self.raw
logger.debug(f'Current data is empty, restart from begining.')
length -= d0.shape[1]
d1 = self.all_data[:, :length]
logger.debug(f'Partly fetch data 2 for {length}.')
self.all_data = self.all_data[:, length:]
return np.concatenate([d0, d1], axis=1)
logger.error(f'Can not fetch data as request, length is "{length}"')
return None
class NeuroScanDeviceClient(object):
'''NeuroScan Device Client.
The communication is in TCP socket,
the process is:
1.1. Connect to the device, @connect;
1.2. Send start scan request to device;
2. Start acquisition, @start_acq;
3. The device will send data every 0.04 seconds;
4. Stop acquisition, @stop_acq;
5.1. Send stop scan request to device;
5.2. Disconnect from the device, @disconnect.
'''
def __init__(self, ip_address, port, sample_rate, n_channels, time_per_packet=0.04, simulationMode=simulationMode, maxLength=maxLength, autoDetectLabelFlag=False, predict=None):
'''Initialize with Basic Parameters,
and connect to the device.
Args:
- @ip_address: The IP address of the device;
- @port: The port of the device;
- @sample_rate: The sample rate;
- @n_channels: The number of channels;
- @time_per_packet: The time gap between two packet from the device, the default value is 0.04 seconds;
- @simulationMode: If use simulation mode, in simulation mode, the EEG Device is ignored, the data will be automatically generated;
- @maxLength: The max length of the data, the unit is in seconds;
- @autoDetectLabelFlag: The flag of automatically detect 33 label, if it detected, the predict function will be called;
- @predict: Predict function, used for autoDetectLabelFlag, it will be called on independent thread when 33 label is detected.
'''
self.simulationMode = simulationMode
self.maxLength = maxLength
self.ip_address = ip_address
self.port = port
self.sample_rate = sample_rate
self.n_channels = n_channels
self.time_per_packet = time_per_packet
self.compute_bytes_per_package()
self._clear()
logger.info(f'EEG Device client initialized.')
if not simulationMode:
self.connect()
else:
self.sdg = SimulationDataGenerator()
logger.info(f'Simulation mode is used')
self.autoDetectLabelFlag = autoDetectLabelFlag
self.predict = predict
if self.autoDetectLabelFlag:
logger.debug(
f'Using auto detect label mode, when 33 received, the predict func will be called')
def _clear(self):
''' Clear data '''
self.data = np.zeros(
(self.n_channels, self.maxLength * self.sample_rate))
self.data_length = 0
logger.info(
f'Created new data pool as zero matrix of {self.data.shape}')
def _add(self, d):
''' Accumulate new data chunk [d] into data '''
n = d.shape[1]
if not self.data_length + n < self.data.shape[1]:
logger.error(
f'The time limit of the data is reached. New data is ignored.')
return
self.data[:, self.data_length:self.data_length+n] = d
self.data_length += n
if 33 in d[-1, :]:
self._predict()
def _predict(self):
t = threading.Thread(target=self.predict)
t.setDaemon(True)
t.start()
def compute_bytes_per_package(self):
'''Compute the length of bytes in every data packet
Generates:
- @packet_time_point: The time points in each packets;
- @bytes_per_packet: The bytes length in each packet.
'''
packet_time_point = int(
np.round(self.sample_rate * self.time_per_packet))
bytes_per_packet = (self.n_channels + 1) * packet_time_point * 4
self.packet_time_point = packet_time_point
self.bytes_per_packet = bytes_per_packet
def _unpack_data_fmt(self):
'''Generate built-in format for unpacking the data
Outs:
- The format.
'''
return '<' + str((self.n_channels + 1) * self.packet_time_point) + 'i'
def _unpack_header(self, header_packet):
'''The method of unpacking header.
Args:
- @header_packet: The header packet to be unpacked.
Outs:
- The contents in the header.
'''
chan_name = struct.unpack('>4s', header_packet[:4])
w_code = struct.unpack('>H', header_packet[4:6])
w_request = struct.unpack('>H', header_packet[6:8])
packet_size = struct.unpack('>I', header_packet[8:])
return (chan_name[0], w_code[0], w_request[0], packet_size[0])
def _unpack_data(self, data_packet):
'''The method of unpacking data.
Args:
- @data_packet: The data packet to be unpacked.
Outs:
- The data in matrix, the shape is (n_channels x time_points).
'''
data_trans = np.asarray(struct.unpack(self._unpack_data_fmt(),
data_packet)).reshape((-1, self.n_channels + 1)).T
return data_trans
def connect(self):
'''Connect to the device,
and start acquisition.
'''
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SEND_BUF_SIZE = self.bytes_per_packet
RECV_BUF_SIZE = self.bytes_per_packet * 9
self.client.connect((self.ip_address, self.port))
self.client.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_SNDBUF, SEND_BUF_SIZE)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF, RECV_BUF_SIZE)
logger.info('Established the connection to EEG Device.')
# Send start acquisition request
self.send(struct.pack('12B', 67, 84, 82, 76, 0, 2, 0, 1, 0, 0, 0, 0))
# Receive reply
header_packet = self.receive_data(24)
logger.debug(f'Received reply for ACQ request: {header_packet}')
def send(self, msg):
'''Send message to the device.
Args:
- @msg: The message to be sent, it should be of bytes.
'''
self.client.send(msg)
logger.debug(f'Sent {msg}')
def start_send(self):
'''Send start sending message to the device.
A thread will be started to collecting data from the device.
Vars:
- @data: Where the data will be stored in;
- @data_length: The accumulated length of the data;
- @collecting: The flag of collecting.
'''
self._clear()
self.collecting = True
if self.simulationMode:
self.sdg.reset()
logger.debug(
f'Not sending start sending message in simulation mode')
else:
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 3, 0, 3, 0, 0, 0, 0))
t = threading.Thread(target=self.collect)
t.setDaemon(True)
t.start()
def collect(self):
'''The collecting method used by start_acq.
- It will collect data until [collecting] is set to False;
- It will report data length every 1000 units;
- It may complain about connection aborted on close, it is fine.
'''
logger.info('Collection Start.')
while self.collecting:
try:
d = self.get_data()
self._add(d)
if self.data_length % self.sample_rate == 0:
logger.debug(
f'Accumulated data length: {self.data_length}')
except ConnectionAbortedError:
logger.warning(
'Connection to the device is closed. This can be normal if collecting is done.')
break
logger.info('Collection Done.')
def get_data(self):
'''Get the data form the latest packet.
The packet is in two parts:
- header: The latest separation shows the length of the data body;
- data: The data body;
- The length of the data body should be equal with the [bytes_per_packet] as prior computed.
Outs:
- new_data_temp: The latest data, the shape is (n_channels x time_points(0.04 seconds)).
'''
if self.simulationMode:
time.sleep(self.time_per_packet)
new_data_temp = self.sdg.pop(self.packet_time_point)
else:
tmp_header = self.receive_data(12)
details_header = self._unpack_header(tmp_header)
if details_header[-1] == self.bytes_per_packet:
pass
else:
print(
f'Warning, received data has {details_header[-1]} bytes, and required data should have {self.bytes_per_packet} bytes. The EEG channels setting may be incorrect')
bytes_data = self.receive_data(self.bytes_per_packet)
new_data_trans = self._unpack_data(bytes_data)
new_data_temp = np.empty(new_data_trans.shape, dtype=np.float)
new_data_temp[:-1, :] = new_data_trans[:-1, :] * 0.0298 # 单位 uV
new_data_temp[-1, :] = np.zeros(new_data_trans.shape[1])
return new_data_temp
def get_all(self):
'''Get the accumulated data as a matrix, the shape is (n_channels x time_points(accumulated)).
Outs:
- The accumulated data.
'''
return self.data[:, :self.data_length]
def receive_data(self, n_bytes):
'''The built-in method of receiving [n_bytes] length bytes from the device,
it will read the buffer until it reached to the [n_bytes] length.
Args:
- @n_bytes: The length of the bytes to be fetched.
Outs:
- The [n_bytes] length bytes.
'''
b_data = b''
flag_stop_recv = False
b_count = 0
while not flag_stop_recv:
tmp_bytes = self.client.recv(n_bytes - b_count)
if b_count == n_bytes or not tmp_bytes:
flag_stop_recv = True
b_count += len(tmp_bytes)
b_data += tmp_bytes
return b_data
def stop_send(self):
'''Send stopping sending message to the device,
and the collecting threading will be stopped accordingly,
it will also clear the existing contents in the buffer.
'''
self.collecting = False
time.sleep(0.1)
if self.simulationMode:
logger.debug(f'Not send stop sending message in simulation mode')
else:
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 3, 0, 4, 0, 0, 0, 0))
self.get_data()
def disconnect(self):
'''Disconnect from the device.
'''
if self.simulationMode:
logger.debug(
f'Not send closing connection message in simulation mode')
else:
# Send stop acquisition request
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 2, 0, 2, 0, 0, 0, 0))
# Send close connection request
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 1, 0, 2, 0, 0, 0, 0))
self.client.close()
logger.info(f'Closed Connection to Device.') | BCIClient/neuroScanToolbox.py | import time
import struct
import socket
import threading
import numpy as np
from .BCIDecoder import generate_simulation_data
from . import logger
simulationMode = True
maxLength = 3600 # Seconds
class SimulationDataGenerator(object):
''' Generate simulation data '''
def __init__(self):
''' Initialize the simulation data '''
self.all_data, _ = generate_simulation_data()
self.raw = self.all_data.copy()
self.ptr = 0
logger.debug(f'Simulation data ({self.all_data.shape}) is generated.')
def reset(self):
''' Reset the simulation data generator,
the generator will work as it was initialized.
'''
self.all_data = self.raw
self.ptr = 0
logger.debug(
f'Simulation restarted from begining, the data has been restored.')
def pop(self, length=40):
''' Pop the simulation data from the top [length]
Args:
- @length: The length to be popped from the top.
'''
if length < self.all_data.shape[1]:
d = self.all_data[:, :length]
self.all_data = self.all_data[:, length:]
# logger.debug(f'Normally fetch data for {length}.')
return d
if length == self.all_data.shape[1]:
d = self.all_data.copy()
# logger.debug(f'Normally fetch data for {length}.')
self.all_data = self.raw
logger.debug(f'Current data is empty, restart from begining.')
return d
if length < self.all_data.shape[1]:
d0 = self.all_data.copy()
logger.debug(f'Partly fetch data 1 for {d0.shape[1]}.')
self.all_data = self.raw
logger.debug(f'Current data is empty, restart from begining.')
length -= d0.shape[1]
d1 = self.all_data[:, :length]
logger.debug(f'Partly fetch data 2 for {length}.')
self.all_data = self.all_data[:, length:]
return np.concatenate([d0, d1], axis=1)
logger.error(f'Can not fetch data as request, length is "{length}"')
return None
class NeuroScanDeviceClient(object):
'''NeuroScan Device Client.
The communication is in TCP socket,
the process is:
1.1. Connect to the device, @connect;
1.2. Send start scan request to device;
2. Start acquisition, @start_acq;
3. The device will send data every 0.04 seconds;
4. Stop acquisition, @stop_acq;
5.1. Send stop scan request to device;
5.2. Disconnect from the device, @disconnect.
'''
def __init__(self, ip_address, port, sample_rate, n_channels, time_per_packet=0.04, simulationMode=simulationMode, maxLength=maxLength, autoDetectLabelFlag=False, predict=None):
'''Initialize with Basic Parameters,
and connect to the device.
Args:
- @ip_address: The IP address of the device;
- @port: The port of the device;
- @sample_rate: The sample rate;
- @n_channels: The number of channels;
- @time_per_packet: The time gap between two packet from the device, the default value is 0.04 seconds;
- @simulationMode: If use simulation mode, in simulation mode, the EEG Device is ignored, the data will be automatically generated;
- @maxLength: The max length of the data, the unit is in seconds;
- @autoDetectLabelFlag: The flag of automatically detect 33 label, if it detected, the predict function will be called;
- @predict: Predict function, used for autoDetectLabelFlag, it will be called on independent thread when 33 label is detected.
'''
self.simulationMode = simulationMode
self.maxLength = maxLength
self.ip_address = ip_address
self.port = port
self.sample_rate = sample_rate
self.n_channels = n_channels
self.time_per_packet = time_per_packet
self.compute_bytes_per_package()
self._clear()
logger.info(f'EEG Device client initialized.')
if not simulationMode:
self.connect()
else:
self.sdg = SimulationDataGenerator()
logger.info(f'Simulation mode is used')
self.autoDetectLabelFlag = autoDetectLabelFlag
self.predict = predict
if self.autoDetectLabelFlag:
logger.debug(
f'Using auto detect label mode, when 33 received, the predict func will be called')
def _clear(self):
''' Clear data '''
self.data = np.zeros(
(self.n_channels, self.maxLength * self.sample_rate))
self.data_length = 0
logger.info(
f'Created new data pool as zero matrix of {self.data.shape}')
def _add(self, d):
''' Accumulate new data chunk [d] into data '''
n = d.shape[1]
if not self.data_length + n < self.data.shape[1]:
logger.error(
f'The time limit of the data is reached. New data is ignored.')
return
self.data[:, self.data_length:self.data_length+n] = d
self.data_length += n
if 33 in d[-1, :]:
self._predict()
def _predict(self):
t = threading.Thread(target=self.predict)
t.setDaemon(True)
t.start()
def compute_bytes_per_package(self):
'''Compute the length of bytes in every data packet
Generates:
- @packet_time_point: The time points in each packets;
- @bytes_per_packet: The bytes length in each packet.
'''
packet_time_point = int(
np.round(self.sample_rate * self.time_per_packet))
bytes_per_packet = (self.n_channels + 1) * packet_time_point * 4
self.packet_time_point = packet_time_point
self.bytes_per_packet = bytes_per_packet
def _unpack_data_fmt(self):
'''Generate built-in format for unpacking the data
Outs:
- The format.
'''
return '<' + str((self.n_channels + 1) * self.packet_time_point) + 'i'
def _unpack_header(self, header_packet):
'''The method of unpacking header.
Args:
- @header_packet: The header packet to be unpacked.
Outs:
- The contents in the header.
'''
chan_name = struct.unpack('>4s', header_packet[:4])
w_code = struct.unpack('>H', header_packet[4:6])
w_request = struct.unpack('>H', header_packet[6:8])
packet_size = struct.unpack('>I', header_packet[8:])
return (chan_name[0], w_code[0], w_request[0], packet_size[0])
def _unpack_data(self, data_packet):
'''The method of unpacking data.
Args:
- @data_packet: The data packet to be unpacked.
Outs:
- The data in matrix, the shape is (n_channels x time_points).
'''
data_trans = np.asarray(struct.unpack(self._unpack_data_fmt(),
data_packet)).reshape((-1, self.n_channels + 1)).T
return data_trans
def connect(self):
'''Connect to the device,
and start acquisition.
'''
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SEND_BUF_SIZE = self.bytes_per_packet
RECV_BUF_SIZE = self.bytes_per_packet * 9
self.client.connect((self.ip_address, self.port))
self.client.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_SNDBUF, SEND_BUF_SIZE)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF, RECV_BUF_SIZE)
logger.info('Established the connection to EEG Device.')
# Send start acquisition request
self.send(struct.pack('12B', 67, 84, 82, 76, 0, 2, 0, 1, 0, 0, 0, 0))
# Receive reply
header_packet = self.receive_data(24)
logger.debug(f'Received reply for ACQ request: {header_packet}')
def send(self, msg):
'''Send message to the device.
Args:
- @msg: The message to be sent, it should be of bytes.
'''
self.client.send(msg)
logger.debug(f'Sent {msg}')
def start_send(self):
'''Send start sending message to the device.
A thread will be started to collecting data from the device.
Vars:
- @data: Where the data will be stored in;
- @data_length: The accumulated length of the data;
- @collecting: The flag of collecting.
'''
self._clear()
self.collecting = True
if self.simulationMode:
self.sdg.reset()
logger.debug(
f'Not sending start sending message in simulation mode')
else:
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 3, 0, 3, 0, 0, 0, 0))
t = threading.Thread(target=self.collect)
t.setDaemon(True)
t.start()
def collect(self):
'''The collecting method used by start_acq.
- It will collect data until [collecting] is set to False;
- It will report data length every 1000 units;
- It may complain about connection aborted on close, it is fine.
'''
logger.info('Collection Start.')
while self.collecting:
try:
d = self.get_data()
self._add(d)
if self.data_length % self.sample_rate == 0:
logger.debug(
f'Accumulated data length: {self.data_length}')
except ConnectionAbortedError:
logger.warning(
'Connection to the device is closed. This can be normal if collecting is done.')
break
logger.info('Collection Done.')
def get_data(self):
'''Get the data form the latest packet.
The packet is in two parts:
- header: The latest separation shows the length of the data body;
- data: The data body;
- The length of the data body should be equal with the [bytes_per_packet] as prior computed.
Outs:
- new_data_temp: The latest data, the shape is (n_channels x time_points(0.04 seconds)).
'''
if self.simulationMode:
time.sleep(self.time_per_packet)
new_data_temp = self.sdg.pop(self.packet_time_point)
else:
tmp_header = self.receive_data(12)
details_header = self._unpack_header(tmp_header)
if details_header[-1] == self.bytes_per_packet:
pass
else:
print(
f'Warning, received data has {details_header[-1]} bytes, and required data should have {self.bytes_per_packet} bytes. The EEG channels setting may be incorrect')
bytes_data = self.receive_data(self.bytes_per_packet)
new_data_trans = self._unpack_data(bytes_data)
new_data_temp = np.empty(new_data_trans.shape, dtype=np.float)
new_data_temp[:-1, :] = new_data_trans[:-1, :] * 0.0298 # 单位 uV
new_data_temp[-1, :] = np.zeros(new_data_trans.shape[1])
return new_data_temp
def get_all(self):
'''Get the accumulated data as a matrix, the shape is (n_channels x time_points(accumulated)).
Outs:
- The accumulated data.
'''
return self.data[:, :self.data_length]
def receive_data(self, n_bytes):
'''The built-in method of receiving [n_bytes] length bytes from the device,
it will read the buffer until it reached to the [n_bytes] length.
Args:
- @n_bytes: The length of the bytes to be fetched.
Outs:
- The [n_bytes] length bytes.
'''
b_data = b''
flag_stop_recv = False
b_count = 0
while not flag_stop_recv:
tmp_bytes = self.client.recv(n_bytes - b_count)
if b_count == n_bytes or not tmp_bytes:
flag_stop_recv = True
b_count += len(tmp_bytes)
b_data += tmp_bytes
return b_data
def stop_send(self):
'''Send stopping sending message to the device,
and the collecting threading will be stopped accordingly,
it will also clear the existing contents in the buffer.
'''
self.collecting = False
time.sleep(0.1)
if self.simulationMode:
logger.debug(f'Not send stop sending message in simulation mode')
else:
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 3, 0, 4, 0, 0, 0, 0))
self.get_data()
def disconnect(self):
'''Disconnect from the device.
'''
if self.simulationMode:
logger.debug(
f'Not send closing connection message in simulation mode')
else:
# Send stop acquisition request
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 2, 0, 2, 0, 0, 0, 0))
# Send close connection request
self.send(struct.pack('12B', 67, 84, 82,
76, 0, 1, 0, 2, 0, 0, 0, 0))
self.client.close()
logger.info(f'Closed Connection to Device.') | 0.607197 | 0.22114 |
import pdb, os, glob, sys, torch, numpy as np, json
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
class SpaceNet(data.Dataset):
name = 'VOC'
classes = ['building']
def __init__(self, anno_file, transform = None):
self.transform = transform
self.root_dir = os.path.dirname(os.path.realpath(anno_file))
self.annos = json.load(open(anno_file, 'r'))
self.annos = list(filter(lambda x: len(x['rects']) > 0, self.annos))
self.keys = ['<KEY>']
def __len__(self):
return len(self.annos)
def __getitem__(self, idx):
anno = self.annos[idx]
img = Image.open(os.path.join(self.root_dir, anno['image_path']))
target = torch.Tensor([[r[k] for k in self.keys] + [0] for r in anno['rects']])
mask = (target[:, 2] - target[:, 0] >= 3) & (target[:, 3] - target[:, 1] >= 3)
if np.random.randint(0, 16) == 0:
mask[:] = False
target = target[mask.unsqueeze(1).expand_as(target)].view(-1, 5)
if len(target) == 0:
target = torch.Tensor([[2, 2, 3, 3, -1]])
if self.transform:
img_data, target = self.transform(img, target)
return img_data, target, img.height, img.width
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
imgs, targets, heights, widths = zip(*batch)
return torch.stack(imgs, 0), targets
if __name__ == '__main__':
ds = VOC('../../data1/VOCdevkit/VOC2012')
img, target, height, width = ds[0]
pdb.set_trace() | data/spacenet.py | import pdb, os, glob, sys, torch, numpy as np, json
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
class SpaceNet(data.Dataset):
name = 'VOC'
classes = ['building']
def __init__(self, anno_file, transform = None):
self.transform = transform
self.root_dir = os.path.dirname(os.path.realpath(anno_file))
self.annos = json.load(open(anno_file, 'r'))
self.annos = list(filter(lambda x: len(x['rects']) > 0, self.annos))
self.keys = ['<KEY>']
def __len__(self):
return len(self.annos)
def __getitem__(self, idx):
anno = self.annos[idx]
img = Image.open(os.path.join(self.root_dir, anno['image_path']))
target = torch.Tensor([[r[k] for k in self.keys] + [0] for r in anno['rects']])
mask = (target[:, 2] - target[:, 0] >= 3) & (target[:, 3] - target[:, 1] >= 3)
if np.random.randint(0, 16) == 0:
mask[:] = False
target = target[mask.unsqueeze(1).expand_as(target)].view(-1, 5)
if len(target) == 0:
target = torch.Tensor([[2, 2, 3, 3, -1]])
if self.transform:
img_data, target = self.transform(img, target)
return img_data, target, img.height, img.width
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
imgs, targets, heights, widths = zip(*batch)
return torch.stack(imgs, 0), targets
if __name__ == '__main__':
ds = VOC('../../data1/VOCdevkit/VOC2012')
img, target, height, width = ds[0]
pdb.set_trace() | 0.547706 | 0.439326 |
import os
import sys
from abc import ABC, abstractmethod
from importlib import import_module
from typing import Any
import torch
from albumentations import Compose
from omegaconf.dictconfig import DictConfig
from torch.nn import Module
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from riad.utils import EarlyStopping
class BaseRunner(ABC):
def __init__(self, cfg: DictConfig) -> None:
super().__init__()
self.cfg = cfg
self.transforms = {k: self._init_transforms(k) for k in self.cfg.transforms.keys()}
self.datasets = {k: self._init_datasets(k) for k in self.cfg.datasets.keys()}
self.dataloaders = {k: self._init_dataloaders(k) for k in self.cfg.dataloaders.keys()}
self.model = self._init_model().to(self.cfg.params.device)
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler()
self.criterions = {k: self._init_criterions(k) for k in self.cfg.criterions.keys()}
self.early_stopping = self._init_early_stopping()
def _init_transforms(self, key: str) -> Compose:
transforms = []
for cfg in self.cfg.transforms[key]:
attr = self._get_attr(cfg.name)
transforms.append(attr(**cfg.get("args", {})))
return Compose(transforms)
def _init_datasets(self, key: str) -> Dataset:
cfg = self.cfg.datasets[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), transforms=self.transforms[key])
def _init_dataloaders(self, key: str) -> DataLoader:
cfg = self.cfg.dataloaders[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), dataset=self.datasets[key])
def _init_model(self) -> Module:
cfg = self.cfg.model
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _init_criterions(self, key: str) -> Module:
cfg = self.cfg.criterions[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _init_optimizer(self) -> Optimizer:
cfg = self.cfg.optimizer
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), params=self.model.parameters())
def _init_scheduler(self) -> _LRScheduler:
cfg = self.cfg.scheduler
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), optimizer=self.optimizer)
def _init_early_stopping(self) -> EarlyStopping:
cfg = self.cfg.early_stopping
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _get_attr(self, name: str) -> Any:
module_path, attr_name = name.split(" - ")
module = import_module(module_path)
return getattr(module, attr_name)
def run(self) -> None:
pbar = tqdm(range(1, self.cfg.params.epochs + 1), desc="epochs")
for epoch in pbar:
self._train(epoch)
val_loss = self._validate(epoch)
self.scheduler.step()
if self.early_stopping(val_loss):
torch.save(self.model.state_dict(), "model.pth")
os.makedirs(f"epochs/{epoch}")
self._test(epoch)
print(f"Early stopped at {epoch} epoch")
sys.exit(0)
if epoch % 10 == 0:
os.makedirs(f"epochs/{epoch}")
self._test(epoch)
torch.save(self.model.state_dict(), "model.pth")
@abstractmethod
def _train(self, epoch: int) -> None:
raise NotImplementedError()
@abstractmethod
def _validate(self, epoch: int) -> float:
raise NotImplementedError()
@abstractmethod
def _test(self, epoch: int) -> None:
raise NotImplementedError() | riad/runner/base_runner.py | import os
import sys
from abc import ABC, abstractmethod
from importlib import import_module
from typing import Any
import torch
from albumentations import Compose
from omegaconf.dictconfig import DictConfig
from torch.nn import Module
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from riad.utils import EarlyStopping
class BaseRunner(ABC):
def __init__(self, cfg: DictConfig) -> None:
super().__init__()
self.cfg = cfg
self.transforms = {k: self._init_transforms(k) for k in self.cfg.transforms.keys()}
self.datasets = {k: self._init_datasets(k) for k in self.cfg.datasets.keys()}
self.dataloaders = {k: self._init_dataloaders(k) for k in self.cfg.dataloaders.keys()}
self.model = self._init_model().to(self.cfg.params.device)
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler()
self.criterions = {k: self._init_criterions(k) for k in self.cfg.criterions.keys()}
self.early_stopping = self._init_early_stopping()
def _init_transforms(self, key: str) -> Compose:
transforms = []
for cfg in self.cfg.transforms[key]:
attr = self._get_attr(cfg.name)
transforms.append(attr(**cfg.get("args", {})))
return Compose(transforms)
def _init_datasets(self, key: str) -> Dataset:
cfg = self.cfg.datasets[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), transforms=self.transforms[key])
def _init_dataloaders(self, key: str) -> DataLoader:
cfg = self.cfg.dataloaders[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), dataset=self.datasets[key])
def _init_model(self) -> Module:
cfg = self.cfg.model
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _init_criterions(self, key: str) -> Module:
cfg = self.cfg.criterions[key]
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _init_optimizer(self) -> Optimizer:
cfg = self.cfg.optimizer
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), params=self.model.parameters())
def _init_scheduler(self) -> _LRScheduler:
cfg = self.cfg.scheduler
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}), optimizer=self.optimizer)
def _init_early_stopping(self) -> EarlyStopping:
cfg = self.cfg.early_stopping
attr = self._get_attr(cfg.name)
return attr(**cfg.get("args", {}))
def _get_attr(self, name: str) -> Any:
module_path, attr_name = name.split(" - ")
module = import_module(module_path)
return getattr(module, attr_name)
def run(self) -> None:
pbar = tqdm(range(1, self.cfg.params.epochs + 1), desc="epochs")
for epoch in pbar:
self._train(epoch)
val_loss = self._validate(epoch)
self.scheduler.step()
if self.early_stopping(val_loss):
torch.save(self.model.state_dict(), "model.pth")
os.makedirs(f"epochs/{epoch}")
self._test(epoch)
print(f"Early stopped at {epoch} epoch")
sys.exit(0)
if epoch % 10 == 0:
os.makedirs(f"epochs/{epoch}")
self._test(epoch)
torch.save(self.model.state_dict(), "model.pth")
@abstractmethod
def _train(self, epoch: int) -> None:
raise NotImplementedError()
@abstractmethod
def _validate(self, epoch: int) -> float:
raise NotImplementedError()
@abstractmethod
def _test(self, epoch: int) -> None:
raise NotImplementedError() | 0.763131 | 0.1831 |
"""Account service tests."""
from unittest import TestCase
from six import PY3
from . import ICloudPyServiceMock
from .const import AUTHENTICATED_USER, VALID_PASSWORD
class AccountServiceTest(TestCase):
""" "Account service tests"""
service = None
def setUp(self):
self.service = ICloudPyServiceMock(AUTHENTICATED_USER, VALID_PASSWORD).account
def test_repr(self):
"""Tests representation."""
# fmt: off
assert repr(self.service) == "<AccountService: {devices: 2, family: 3, storage: 3020076244 bytes free}>"
# fmt: on
def test_devices(self):
"""Tests devices."""
assert self.service.devices
assert len(self.service.devices) == 2
for device in self.service.devices:
assert device.name
assert device.model
assert device.udid
assert device["serialNumber"]
assert device["osVersion"]
assert device["modelLargePhotoURL2x"]
assert device["modelLargePhotoURL1x"]
assert device["paymentMethods"]
assert device["name"]
assert device["model"]
assert device["udid"]
assert device["modelSmallPhotoURL2x"]
assert device["modelSmallPhotoURL1x"]
assert device["modelDisplayName"]
# fmt: off
if PY3:
assert repr(device) == "<AccountDevice: {model: "+device.model_display_name+", name: "+device.name+"}>"
# fmt: on
def test_family(self):
"""Tests family members."""
assert self.service.family
assert len(self.service.family) == 3
for member in self.service.family:
assert member.last_name
assert member.dsid
assert member.original_invitation_email
assert member.full_name
assert member.age_classification
assert member.apple_id_for_purchases
assert member.apple_id
assert member.first_name
assert not member.has_screen_time_enabled
assert not member.has_ask_to_buy_enabled
assert not member.share_my_location_enabled_family_members
assert member.dsid_for_purchases
# fmt: off
# pylint: disable=C0301
assert repr(member) == "<FamilyMember: {name: "+member.full_name+", age_classification: "+member.age_classification+"}>"
# fmt: on
def test_storage(self):
"""Tests storage."""
assert self.service.storage
# fmt: off
if PY3:
# pylint: disable=C0301
assert repr(self.service.storage) == "<AccountStorage: {usage: 43.75% used of 5368709120 bytes, usages_by_media: OrderedDict([('photos', <AccountStorageUsageForMedia: {key: photos, usage: 0 bytes}>), ('backup', <AccountStorageUsageForMedia: {key: backup, usage: 799008186 bytes}>), ('docs', <AccountStorageUsageForMedia: {key: docs, usage: 449092146 bytes}>), ('mail', <AccountStorageUsageForMedia: {key: mail, usage: 1101522944 bytes}>)])}>"
# fmt: on
def test_storage_usage(self):
"""Tests storage usage."""
assert self.service.storage.usage
usage = self.service.storage.usage
assert usage.comp_storage_in_bytes or usage.comp_storage_in_bytes == 0
assert usage.used_storage_in_bytes
assert usage.used_storage_in_percent
assert usage.available_storage_in_bytes
assert usage.available_storage_in_percent
assert usage.total_storage_in_bytes
assert usage.commerce_storage_in_bytes or usage.commerce_storage_in_bytes == 0
assert not usage.quota_over
assert not usage.quota_tier_max
assert not usage.quota_almost_full
assert not usage.quota_paid
# fmt: off
# pylint: disable=C0301
assert repr(usage) == "<AccountStorageUsage: "+str(usage.used_storage_in_percent)+"% used of "+str(usage.total_storage_in_bytes)+" bytes>"
# fmt: on
def test_storage_usages_by_media(self):
"""Tests storage usages by media."""
assert self.service.storage.usages_by_media
for usage_media in self.service.storage.usages_by_media.values():
assert usage_media.key
assert usage_media.label
assert usage_media.color
assert usage_media.usage_in_bytes or usage_media.usage_in_bytes == 0
# fmt: off
# pylint: disable=C0301
assert repr(usage_media) == "<AccountStorageUsageForMedia: {key: "+usage_media.key+", usage: "+str(usage_media.usage_in_bytes)+" bytes}>"
# fmt: on | tests/test_account.py | """Account service tests."""
from unittest import TestCase
from six import PY3
from . import ICloudPyServiceMock
from .const import AUTHENTICATED_USER, VALID_PASSWORD
class AccountServiceTest(TestCase):
""" "Account service tests"""
service = None
def setUp(self):
self.service = ICloudPyServiceMock(AUTHENTICATED_USER, VALID_PASSWORD).account
def test_repr(self):
"""Tests representation."""
# fmt: off
assert repr(self.service) == "<AccountService: {devices: 2, family: 3, storage: 3020076244 bytes free}>"
# fmt: on
def test_devices(self):
"""Tests devices."""
assert self.service.devices
assert len(self.service.devices) == 2
for device in self.service.devices:
assert device.name
assert device.model
assert device.udid
assert device["serialNumber"]
assert device["osVersion"]
assert device["modelLargePhotoURL2x"]
assert device["modelLargePhotoURL1x"]
assert device["paymentMethods"]
assert device["name"]
assert device["model"]
assert device["udid"]
assert device["modelSmallPhotoURL2x"]
assert device["modelSmallPhotoURL1x"]
assert device["modelDisplayName"]
# fmt: off
if PY3:
assert repr(device) == "<AccountDevice: {model: "+device.model_display_name+", name: "+device.name+"}>"
# fmt: on
def test_family(self):
"""Tests family members."""
assert self.service.family
assert len(self.service.family) == 3
for member in self.service.family:
assert member.last_name
assert member.dsid
assert member.original_invitation_email
assert member.full_name
assert member.age_classification
assert member.apple_id_for_purchases
assert member.apple_id
assert member.first_name
assert not member.has_screen_time_enabled
assert not member.has_ask_to_buy_enabled
assert not member.share_my_location_enabled_family_members
assert member.dsid_for_purchases
# fmt: off
# pylint: disable=C0301
assert repr(member) == "<FamilyMember: {name: "+member.full_name+", age_classification: "+member.age_classification+"}>"
# fmt: on
def test_storage(self):
"""Tests storage."""
assert self.service.storage
# fmt: off
if PY3:
# pylint: disable=C0301
assert repr(self.service.storage) == "<AccountStorage: {usage: 43.75% used of 5368709120 bytes, usages_by_media: OrderedDict([('photos', <AccountStorageUsageForMedia: {key: photos, usage: 0 bytes}>), ('backup', <AccountStorageUsageForMedia: {key: backup, usage: 799008186 bytes}>), ('docs', <AccountStorageUsageForMedia: {key: docs, usage: 449092146 bytes}>), ('mail', <AccountStorageUsageForMedia: {key: mail, usage: 1101522944 bytes}>)])}>"
# fmt: on
def test_storage_usage(self):
"""Tests storage usage."""
assert self.service.storage.usage
usage = self.service.storage.usage
assert usage.comp_storage_in_bytes or usage.comp_storage_in_bytes == 0
assert usage.used_storage_in_bytes
assert usage.used_storage_in_percent
assert usage.available_storage_in_bytes
assert usage.available_storage_in_percent
assert usage.total_storage_in_bytes
assert usage.commerce_storage_in_bytes or usage.commerce_storage_in_bytes == 0
assert not usage.quota_over
assert not usage.quota_tier_max
assert not usage.quota_almost_full
assert not usage.quota_paid
# fmt: off
# pylint: disable=C0301
assert repr(usage) == "<AccountStorageUsage: "+str(usage.used_storage_in_percent)+"% used of "+str(usage.total_storage_in_bytes)+" bytes>"
# fmt: on
def test_storage_usages_by_media(self):
"""Tests storage usages by media."""
assert self.service.storage.usages_by_media
for usage_media in self.service.storage.usages_by_media.values():
assert usage_media.key
assert usage_media.label
assert usage_media.color
assert usage_media.usage_in_bytes or usage_media.usage_in_bytes == 0
# fmt: off
# pylint: disable=C0301
assert repr(usage_media) == "<AccountStorageUsageForMedia: {key: "+usage_media.key+", usage: "+str(usage_media.usage_in_bytes)+" bytes}>"
# fmt: on | 0.755817 | 0.490602 |
from django.core.validators import FileExtensionValidator
from django.db import models
from src.base.services import get_path_upload_avatar, validate_size_img
class AuthUser(models.Model):
""" Модель пользователя
"""
email = models.EmailField(max_length=150, unique=True)
join_date = models.DateTimeField(auto_now_add=True)
country = models.CharField(max_length=30, blank=True, null=True)
city = models.CharField(max_length=30, blank=True, null=True)
bio = models.TextField(max_length=2000, blank=True, null=True)
display_name = models.CharField(max_length=30, blank=True, null=True)
avatar = models.ImageField(
upload_to=get_path_upload_avatar,
blank=True,
null=True,
validators=[FileExtensionValidator(allowed_extensions=['jpg']),
validate_size_img]
)
@property
def is_authenticated(self):
""" Всегда возвращает True. Это способ узнать был ли пользователь аутентифицирован"""
return True
def __str__(self):
return self.email
def __repr__(self):
return f'{self.__class__.__name__}({self.email})'
class Follower(models.Model):
""" Модель подписчиков
"""
user = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='owner')
subscriber = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='subscribers')
def __str__(self):
return f'{self.subscriber} подписан на {self.user}'
def __repr__(self):
return f'{self.__class__.__name__}({self.user})'
class SocialLink(models.Model):
""" Модель ссылок на социальные сети пользователя
"""
user = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='social_links')
link = models.URLField(max_length=500)
def __str__(self):
return f'{self.user}'
def __repr__(self):
return f'{self.__class__.__name__}({self.user})' | src/oauth/models.py | from django.core.validators import FileExtensionValidator
from django.db import models
from src.base.services import get_path_upload_avatar, validate_size_img
class AuthUser(models.Model):
""" Модель пользователя
"""
email = models.EmailField(max_length=150, unique=True)
join_date = models.DateTimeField(auto_now_add=True)
country = models.CharField(max_length=30, blank=True, null=True)
city = models.CharField(max_length=30, blank=True, null=True)
bio = models.TextField(max_length=2000, blank=True, null=True)
display_name = models.CharField(max_length=30, blank=True, null=True)
avatar = models.ImageField(
upload_to=get_path_upload_avatar,
blank=True,
null=True,
validators=[FileExtensionValidator(allowed_extensions=['jpg']),
validate_size_img]
)
@property
def is_authenticated(self):
""" Всегда возвращает True. Это способ узнать был ли пользователь аутентифицирован"""
return True
def __str__(self):
return self.email
def __repr__(self):
return f'{self.__class__.__name__}({self.email})'
class Follower(models.Model):
""" Модель подписчиков
"""
user = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='owner')
subscriber = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='subscribers')
def __str__(self):
return f'{self.subscriber} подписан на {self.user}'
def __repr__(self):
return f'{self.__class__.__name__}({self.user})'
class SocialLink(models.Model):
""" Модель ссылок на социальные сети пользователя
"""
user = models.ForeignKey('AuthUser',
on_delete=models.CASCADE,
related_name='social_links')
link = models.URLField(max_length=500)
def __str__(self):
return f'{self.user}'
def __repr__(self):
return f'{self.__class__.__name__}({self.user})' | 0.550607 | 0.077657 |
import json
from django.conf import settings
from django.db import models
from django.template.defaultfilters import slugify
from fernet_fields import EncryptedCharField
from fernet_fields import EncryptedTextField
from polymorphic.models import PolymorphicModel
import yaml
class DateNameAwareModel(models.Model):
# Automatically add timestamps when object is created
added = models.DateTimeField(auto_now_add=True)
# Automatically add timestamps when object is updated
updated = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=60)
class Meta:
abstract = True
def __str__(self):
return "{0}".format(self.name)
class Cloud(PolymorphicModel):
name = models.CharField(max_length=60)
id = models.SlugField(max_length=50, primary_key=True)
access_instructions_url = models.URLField(max_length=2048, blank=True,
null=True)
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.id = slugify(self.name)
super(Cloud, self).save(*args, **kwargs)
def __str__(self):
return "{0} ({1})".format(self.name, self.id)
class Meta:
ordering = ['name']
verbose_name = "Cloud"
verbose_name_plural = "Clouds"
class AWSCloud(Cloud):
class Meta:
verbose_name = "Amazon Web Services"
verbose_name_plural = "Amazon Web Services"
class AzureCloud(Cloud):
class Meta:
verbose_name = "Azure"
verbose_name_plural = "Azure"
class GCPCloud(Cloud):
class Meta:
verbose_name = "Google Cloud Platform"
verbose_name_plural = "Google Cloud Platform"
class OpenStackCloud(Cloud):
KEYSTONE_VERSION_CHOICES = (
('v2.0', 'v2.0'),
('v3.0', 'v3.0'))
auth_url = models.CharField(max_length=255, blank=False, null=False)
identity_api_version = models.CharField(
max_length=10, blank=True, null=True, choices=KEYSTONE_VERSION_CHOICES)
class Meta:
verbose_name = "OpenStack"
verbose_name_plural = "OpenStack"
class Region(PolymorphicModel):
cloud = models.ForeignKey('Cloud', models.CASCADE,
related_name='regions')
name = models.CharField(
max_length=60, verbose_name="Region name",
help_text="This is the name of the region as understood by the cloud "
"provider and is required. e.g. us-east-1")
region_id = models.SlugField(
max_length=50, verbose_name="Region id",
help_text="This is the id for the region and is used in the ReST url.")
cloudbridge_settings = models.TextField(
max_length=1024 * 16, blank=True, null=True,
help_text="Extra settings to pass to the cloudbridge provider")
def __str__(self):
return "{0} ({1})".format(self.name, self.cloud.name)
def save(self, *args, **kwargs):
if self.cloudbridge_settings:
try:
yaml.safe_load(self.cloudbridge_settings)
except Exception as e:
raise Exception("Invalid YAML syntax. CloudBridge settings"
"must be in YAML format. Cause: {0}".format(e))
if not self.region_id:
# Newly created object, so set slug
self.region_id = slugify(self.name)
super(Region, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
unique_together = (("cloud", "region_id"),)
class AWSRegion(Region):
ec2_endpoint_url = models.CharField(
max_length=255, blank=True, null=True, verbose_name="EC2 endpoint url",
help_text="This field should be left blank unless using a custom "
"endpoint for an AWS compatible cloud.")
ec2_is_secure = models.BooleanField(default=True,
verbose_name="EC2 is secure")
ec2_validate_certs = models.BooleanField(
default=True, verbose_name="EC2 validate certificates")
s3_endpoint_url = models.CharField(max_length=255, blank=True, null=True,
verbose_name="S3 endpoint url")
s3_is_secure = models.BooleanField(default=True,
verbose_name="S3 is secure")
s3_validate_certs = models.BooleanField(
default=True, verbose_name="S3 validate certificates")
class Meta:
verbose_name = "AWS Region"
verbose_name_plural = "AWS Regions"
class AzureRegion(Region):
class Meta:
verbose_name = "Azure"
verbose_name_plural = "Azure"
class GCPRegion(Region):
class Meta:
verbose_name = "GCP"
verbose_name_plural = "GCP"
class OpenStackRegion(Region):
class Meta:
verbose_name = "OpenStack Region"
verbose_name_plural = "OpenStack Regions"
class Zone(models.Model):
zone_id = models.SlugField(max_length=50, verbose_name="Zone id")
region = models.ForeignKey('Region', models.CASCADE,
related_name='zones')
name = models.CharField(max_length=60, verbose_name="Zone name",
blank=True, null=True)
def __str__(self):
region = self.region
return "{0}.{1}.{2}".format(region.cloud.id, region.region_id, self.zone_id)
def save(self, *args, **kwargs):
if not self.zone_id:
# Newly created object, so set slug
self.zone_id = slugify(self.name)
super(Zone, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
unique_together = (("region", "zone_id"),)
class Credentials(PolymorphicModel, DateNameAwareModel):
user_profile = models.ForeignKey('UserProfile', models.CASCADE,
related_name='credentials')
def to_dict(self):
return {'id': self.id,
'name': self.name,
}
class CloudCredentials(Credentials):
default = models.BooleanField(
help_text="If set, use as default credentials for the selected cloud",
blank=True, default=False)
cloud = models.ForeignKey('Cloud', models.CASCADE,
related_name='credentials')
def save(self, *args, **kwargs):
# Ensure only 1 set of credentials is selected as the 'default' for
# the current cloud.
# This is not atomic but don't know how to enforce it at the
# DB level directly.
if self.default is True:
previous_default = CloudCredentials.objects.filter(
cloud=self.cloud, default=True,
user_profile=self.user_profile).first()
if previous_default:
previous_default.default = False
previous_default.save()
return super(CloudCredentials, self).save()
def to_dict(self):
return {'id': self.id,
'name': self.name,
'default': self.default,
'cloud_id': self.cloud_id
}
class AWSCredentials(CloudCredentials):
aws_access_key = models.CharField(max_length=50, blank=False, null=False)
aws_secret_key = EncryptedCharField(max_length=50, blank=False, null=False)
class Meta:
verbose_name = "AWS Credential"
verbose_name_plural = "AWS Credentials"
def to_dict(self):
d = super(AWSCredentials, self).to_dict()
d['aws_access_key'] = self.aws_access_key
d['aws_secret_key'] = self.aws_secret_key
return d
class OpenStackCredentials(CloudCredentials):
os_username = models.CharField(max_length=50, blank=False, null=False)
os_password = EncryptedCharField(max_length=50, blank=False, null=False)
os_project_name = models.CharField(max_length=50, blank=False, null=False)
os_project_domain_id = models.CharField(max_length=50, blank=True,
null=True)
os_project_domain_name = models.CharField(max_length=50, blank=True,
null=True)
os_user_domain_name = models.CharField(max_length=50, blank=True,
null=True)
class Meta:
verbose_name = "OpenStack Credential"
verbose_name_plural = "OpenStack Credentials"
def to_dict(self):
d = super(OpenStackCredentials, self).to_dict()
d['os_username'] = self.os_username
d['os_password'] = <PASSWORD>
if self.os_project_name:
d['os_project_name'] = self.os_project_name
if self.os_project_domain_id:
d['os_project_domain_id'] = self.os_project_domain_id
if self.os_project_domain_name:
d['os_project_domain_name'] = self.os_project_domain_name
if self.os_user_domain_name:
d['os_user_domain_name'] = self.os_user_domain_name
return d
class GCPCredentials(CloudCredentials):
gcp_service_creds_dict = EncryptedTextField(blank=False, null=False)
gcp_vm_default_username = models.CharField(max_length=100, blank=False,
null=False, default='cbuser')
def save(self, *args, **kwargs):
if self.gcp_service_creds_dict:
try:
json.loads(self.gcp_service_creds_dict)
except Exception as e:
raise Exception("Invalid JSON syntax. GCP Credentials must be"
" in JSON format. Cause: {0}".format(e))
super(GCPCredentials, self).save(*args, **kwargs)
class Meta:
verbose_name = "GCP Credential"
verbose_name_plural = "GCP Credentials"
def to_dict(self):
gcp_creds = super(GCPCredentials, self).to_dict()
gcp_creds['gcp_service_creds_dict'] = json.loads(self.gcp_service_creds_dict)
gcp_creds['gcp_vm_default_username'] = self.gcp_vm_default_username
return gcp_creds
class AzureCredentials(CloudCredentials):
azure_subscription_id = models.CharField(max_length=50, blank=False,
null=False)
azure_client_id = models.CharField(max_length=50, blank=False, null=False)
azure_secret = EncryptedCharField(max_length=50, blank=False, null=False)
azure_tenant = models.CharField(max_length=50, blank=True, null=True)
azure_resource_group = models.CharField(max_length=64, blank=False,
null=False, default='cloudbridge')
azure_storage_account = models.CharField(max_length=24, blank=False,
null=False, default='cbstorage')
azure_vm_default_username = models.CharField(max_length=100, blank=False,
null=False, default='cbuser')
class Meta:
verbose_name = "Azure Credential"
verbose_name_plural = "Azure Credentials"
def to_dict(self):
d = super(AzureCredentials, self).to_dict()
d['azure_subscription_id'] = self.azure_subscription_id
d['azure_client_id'] = self.azure_client_id
d['azure_secret'] = self.azure_secret
d['azure_tenant'] = self.azure_tenant
d['azure_resource_group'] = self.azure_resource_group
d['azure_storage_account'] = self.azure_storage_account
d['azure_vm_default_username'] = self.azure_vm_default_username
return d
class UserProfile(models.Model):
# Link UserProfile to a User model instance
user = models.OneToOneField(settings.AUTH_USER_MODEL, models.CASCADE)
slug = models.SlugField(unique=True, primary_key=True, editable=False)
class Meta:
verbose_name = "User Profile"
verbose_name_plural = "User Profiles"
def __str__(self):
return "{0} ({1} {2})".format(self.user.username, self.user.first_name,
self.user.last_name)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.user.username)
super(UserProfile, self).save(*args, **kwargs) | djcloudbridge/models.py | import json
from django.conf import settings
from django.db import models
from django.template.defaultfilters import slugify
from fernet_fields import EncryptedCharField
from fernet_fields import EncryptedTextField
from polymorphic.models import PolymorphicModel
import yaml
class DateNameAwareModel(models.Model):
# Automatically add timestamps when object is created
added = models.DateTimeField(auto_now_add=True)
# Automatically add timestamps when object is updated
updated = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=60)
class Meta:
abstract = True
def __str__(self):
return "{0}".format(self.name)
class Cloud(PolymorphicModel):
name = models.CharField(max_length=60)
id = models.SlugField(max_length=50, primary_key=True)
access_instructions_url = models.URLField(max_length=2048, blank=True,
null=True)
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.id = slugify(self.name)
super(Cloud, self).save(*args, **kwargs)
def __str__(self):
return "{0} ({1})".format(self.name, self.id)
class Meta:
ordering = ['name']
verbose_name = "Cloud"
verbose_name_plural = "Clouds"
class AWSCloud(Cloud):
class Meta:
verbose_name = "Amazon Web Services"
verbose_name_plural = "Amazon Web Services"
class AzureCloud(Cloud):
class Meta:
verbose_name = "Azure"
verbose_name_plural = "Azure"
class GCPCloud(Cloud):
class Meta:
verbose_name = "Google Cloud Platform"
verbose_name_plural = "Google Cloud Platform"
class OpenStackCloud(Cloud):
KEYSTONE_VERSION_CHOICES = (
('v2.0', 'v2.0'),
('v3.0', 'v3.0'))
auth_url = models.CharField(max_length=255, blank=False, null=False)
identity_api_version = models.CharField(
max_length=10, blank=True, null=True, choices=KEYSTONE_VERSION_CHOICES)
class Meta:
verbose_name = "OpenStack"
verbose_name_plural = "OpenStack"
class Region(PolymorphicModel):
cloud = models.ForeignKey('Cloud', models.CASCADE,
related_name='regions')
name = models.CharField(
max_length=60, verbose_name="Region name",
help_text="This is the name of the region as understood by the cloud "
"provider and is required. e.g. us-east-1")
region_id = models.SlugField(
max_length=50, verbose_name="Region id",
help_text="This is the id for the region and is used in the ReST url.")
cloudbridge_settings = models.TextField(
max_length=1024 * 16, blank=True, null=True,
help_text="Extra settings to pass to the cloudbridge provider")
def __str__(self):
return "{0} ({1})".format(self.name, self.cloud.name)
def save(self, *args, **kwargs):
if self.cloudbridge_settings:
try:
yaml.safe_load(self.cloudbridge_settings)
except Exception as e:
raise Exception("Invalid YAML syntax. CloudBridge settings"
"must be in YAML format. Cause: {0}".format(e))
if not self.region_id:
# Newly created object, so set slug
self.region_id = slugify(self.name)
super(Region, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
unique_together = (("cloud", "region_id"),)
class AWSRegion(Region):
ec2_endpoint_url = models.CharField(
max_length=255, blank=True, null=True, verbose_name="EC2 endpoint url",
help_text="This field should be left blank unless using a custom "
"endpoint for an AWS compatible cloud.")
ec2_is_secure = models.BooleanField(default=True,
verbose_name="EC2 is secure")
ec2_validate_certs = models.BooleanField(
default=True, verbose_name="EC2 validate certificates")
s3_endpoint_url = models.CharField(max_length=255, blank=True, null=True,
verbose_name="S3 endpoint url")
s3_is_secure = models.BooleanField(default=True,
verbose_name="S3 is secure")
s3_validate_certs = models.BooleanField(
default=True, verbose_name="S3 validate certificates")
class Meta:
verbose_name = "AWS Region"
verbose_name_plural = "AWS Regions"
class AzureRegion(Region):
class Meta:
verbose_name = "Azure"
verbose_name_plural = "Azure"
class GCPRegion(Region):
class Meta:
verbose_name = "GCP"
verbose_name_plural = "GCP"
class OpenStackRegion(Region):
class Meta:
verbose_name = "OpenStack Region"
verbose_name_plural = "OpenStack Regions"
class Zone(models.Model):
zone_id = models.SlugField(max_length=50, verbose_name="Zone id")
region = models.ForeignKey('Region', models.CASCADE,
related_name='zones')
name = models.CharField(max_length=60, verbose_name="Zone name",
blank=True, null=True)
def __str__(self):
region = self.region
return "{0}.{1}.{2}".format(region.cloud.id, region.region_id, self.zone_id)
def save(self, *args, **kwargs):
if not self.zone_id:
# Newly created object, so set slug
self.zone_id = slugify(self.name)
super(Zone, self).save(*args, **kwargs)
class Meta:
ordering = ['name']
unique_together = (("region", "zone_id"),)
class Credentials(PolymorphicModel, DateNameAwareModel):
user_profile = models.ForeignKey('UserProfile', models.CASCADE,
related_name='credentials')
def to_dict(self):
return {'id': self.id,
'name': self.name,
}
class CloudCredentials(Credentials):
default = models.BooleanField(
help_text="If set, use as default credentials for the selected cloud",
blank=True, default=False)
cloud = models.ForeignKey('Cloud', models.CASCADE,
related_name='credentials')
def save(self, *args, **kwargs):
# Ensure only 1 set of credentials is selected as the 'default' for
# the current cloud.
# This is not atomic but don't know how to enforce it at the
# DB level directly.
if self.default is True:
previous_default = CloudCredentials.objects.filter(
cloud=self.cloud, default=True,
user_profile=self.user_profile).first()
if previous_default:
previous_default.default = False
previous_default.save()
return super(CloudCredentials, self).save()
def to_dict(self):
return {'id': self.id,
'name': self.name,
'default': self.default,
'cloud_id': self.cloud_id
}
class AWSCredentials(CloudCredentials):
aws_access_key = models.CharField(max_length=50, blank=False, null=False)
aws_secret_key = EncryptedCharField(max_length=50, blank=False, null=False)
class Meta:
verbose_name = "AWS Credential"
verbose_name_plural = "AWS Credentials"
def to_dict(self):
d = super(AWSCredentials, self).to_dict()
d['aws_access_key'] = self.aws_access_key
d['aws_secret_key'] = self.aws_secret_key
return d
class OpenStackCredentials(CloudCredentials):
os_username = models.CharField(max_length=50, blank=False, null=False)
os_password = EncryptedCharField(max_length=50, blank=False, null=False)
os_project_name = models.CharField(max_length=50, blank=False, null=False)
os_project_domain_id = models.CharField(max_length=50, blank=True,
null=True)
os_project_domain_name = models.CharField(max_length=50, blank=True,
null=True)
os_user_domain_name = models.CharField(max_length=50, blank=True,
null=True)
class Meta:
verbose_name = "OpenStack Credential"
verbose_name_plural = "OpenStack Credentials"
def to_dict(self):
d = super(OpenStackCredentials, self).to_dict()
d['os_username'] = self.os_username
d['os_password'] = <PASSWORD>
if self.os_project_name:
d['os_project_name'] = self.os_project_name
if self.os_project_domain_id:
d['os_project_domain_id'] = self.os_project_domain_id
if self.os_project_domain_name:
d['os_project_domain_name'] = self.os_project_domain_name
if self.os_user_domain_name:
d['os_user_domain_name'] = self.os_user_domain_name
return d
class GCPCredentials(CloudCredentials):
gcp_service_creds_dict = EncryptedTextField(blank=False, null=False)
gcp_vm_default_username = models.CharField(max_length=100, blank=False,
null=False, default='cbuser')
def save(self, *args, **kwargs):
if self.gcp_service_creds_dict:
try:
json.loads(self.gcp_service_creds_dict)
except Exception as e:
raise Exception("Invalid JSON syntax. GCP Credentials must be"
" in JSON format. Cause: {0}".format(e))
super(GCPCredentials, self).save(*args, **kwargs)
class Meta:
verbose_name = "GCP Credential"
verbose_name_plural = "GCP Credentials"
def to_dict(self):
gcp_creds = super(GCPCredentials, self).to_dict()
gcp_creds['gcp_service_creds_dict'] = json.loads(self.gcp_service_creds_dict)
gcp_creds['gcp_vm_default_username'] = self.gcp_vm_default_username
return gcp_creds
class AzureCredentials(CloudCredentials):
azure_subscription_id = models.CharField(max_length=50, blank=False,
null=False)
azure_client_id = models.CharField(max_length=50, blank=False, null=False)
azure_secret = EncryptedCharField(max_length=50, blank=False, null=False)
azure_tenant = models.CharField(max_length=50, blank=True, null=True)
azure_resource_group = models.CharField(max_length=64, blank=False,
null=False, default='cloudbridge')
azure_storage_account = models.CharField(max_length=24, blank=False,
null=False, default='cbstorage')
azure_vm_default_username = models.CharField(max_length=100, blank=False,
null=False, default='cbuser')
class Meta:
verbose_name = "Azure Credential"
verbose_name_plural = "Azure Credentials"
def to_dict(self):
d = super(AzureCredentials, self).to_dict()
d['azure_subscription_id'] = self.azure_subscription_id
d['azure_client_id'] = self.azure_client_id
d['azure_secret'] = self.azure_secret
d['azure_tenant'] = self.azure_tenant
d['azure_resource_group'] = self.azure_resource_group
d['azure_storage_account'] = self.azure_storage_account
d['azure_vm_default_username'] = self.azure_vm_default_username
return d
class UserProfile(models.Model):
# Link UserProfile to a User model instance
user = models.OneToOneField(settings.AUTH_USER_MODEL, models.CASCADE)
slug = models.SlugField(unique=True, primary_key=True, editable=False)
class Meta:
verbose_name = "User Profile"
verbose_name_plural = "User Profiles"
def __str__(self):
return "{0} ({1} {2})".format(self.user.username, self.user.first_name,
self.user.last_name)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.user.username)
super(UserProfile, self).save(*args, **kwargs) | 0.532182 | 0.108236 |
import copy
import types
import pickle
import numpy
from learning import Model
from learning.rlearn import RLTable
class MultiOutputs(Model):
"""Ensemble enabling given model to return a higher dimensional output tensor.
Can be used to return an output vector from a model returing an output value.
Or an output matrix from a model returning an output vector.
Etc.
MultiOutputs can be nested.
For each output value,
a model mapping input_vec to output value is learned.
New outputs are returned by concatenating all model outputs.
Args:
models: list<Model> or Model; List of models,
or Model that is duplicated by num_outputs
num_outputs: How many components in target vectors.
"""
def __init__(self, models, num_outputs=None):
super(MultiOutputs, self).__init__()
if isinstance(models, Model):
# Store copy of model for each output
if num_outputs is None:
raise ValueError(
'If Model is given, num_outputs must not be None')
self._models = [copy.deepcopy(models) for _ in range(num_outputs)]
else:
if not isinstance(models, (list, tuple)):
raise ValueError('models must be list or tuple, or Model')
# Validate that list contains Model's, and no duplicates
for i, model in enumerate(models):
if not isinstance(model, Model):
raise ValueError('models must contain instances of Model')
# No duplicates
for other_model in models[i + 1:]:
if other_model is model:
raise ValueError(
'models should not contain duplicate instances')
# Validation done, store it
self._models = models[:]
self._num_outputs = len(self._models)
# Use reinforcement learning to select which output to update
# We use different between new and old error as reward
self._rl_agent = RLTable(
[None],
range(self._num_outputs),
initial_reward=1.0,
update_rate=0.25,
reward_growth=0.01)
self._errors = [None] * self._num_outputs
def reset(self):
"""Reset this model."""
super(MultiOutputs, self).reset()
# Reset RL agent
self._rl_agent = RLTable([None], range(self._num_outputs))
self._errors = [None] * self._num_outputs
# Reset each stored model
for model in self._models:
model.reset()
def activate(self, inputs):
"""Return the model outputs for given inputs.
One output for each stored model.
"""
return [model.activate(inputs) for model in self._models]
def train_step(self, input_matrix, target_matrix):
"""Adjust the model towards the targets for given inputs.
Adjusts each AUTO unit towards its corresponding target vector.
"""
if len(target_matrix[0]) != self._num_outputs:
raise ValueError(
'Target matrix column does not match expected number of outputs'
)
# Each model learns a single component (column) of the target matrix
# First iteration, we update all outputs (for baseline)
# Then we update only a single output value per iteration.
# NOTE: If a stored model doesn't return error, we default to updating all
# every iteration, because we can't know which is best to update
# TODO: Update None errors every iteration, and select one from non-Nones
if None in self._errors:
self._update_all_outputs(input_matrix, target_matrix)
else:
self._update_one_output(input_matrix, target_matrix)
try:
return sum(self._errors) / len(self._errors)
except TypeError:
# Model didn't return error
return None
def train(self, input_matrix, target_matrix, *args, **kwargs):
"""Train model to converge on set of patterns."""
if len(target_matrix[0]) != self._num_outputs:
raise ValueError(
'Target matrix column does not match expected number of outputs'
)
# Train each stored model
for i, (model, targets) in enumerate(
zip(self._models, _transpose_rowcol(target_matrix))):
if self.logging:
if i != 0:
print
print 'Training Model %d:' % (i + 1)
else:
model.logging = self.logging
model.train(input_matrix, targets, *args, **kwargs)
self.iteration = sum([model.iteration for model in self._models])
def serialize(self):
"""Convert model into string.
Returns:
string; A string representing this network.
"""
# Use serialize on each model, instead of pickle
serialized_models = [(type(model), model.serialize())
for model in self._models]
# Pickle all other attributes
attributes = copy.copy(self.__dict__)
del attributes['_models']
return pickle.dumps((serialized_models, attributes), protocol=2)
@classmethod
def unserialize(cls, serialized_model):
"""Convert serialized model into Model.
Returns:
Model; A Model object.
"""
serialized_models, attributes = pickle.loads(serialized_model)
# Make model, from serialized models and attributes
model = MultiOutputs.__new__(MultiOutputs)
model.__dict__ = attributes
# unserialize each model
model._models = [
class_.unserialize(model_str)
for class_, model_str in serialized_models
]
return model
def mse(self, input_vec, target_vec):
"""Return the mean squared error (MSE) for a pattern."""
return numpy.mean([
model.mse(input_vec, target)
for model, target in zip(self._models,
_transpose_rowcol(target_vec))
])
def _update_one_output(self, input_matrix, target_matrix):
"""Update the model that most shows the ability to improve."""
# Use reinforcement learning to select output to update.
to_update = self._rl_agent.get_action(None)
new_error = self._models[to_update].train_step(input_matrix,
_matrix_col(
target_matrix,
to_update))
# Update RL agent
self._rl_agent.update(None, to_update,
_get_reward(self._errors[to_update], new_error))
# Update our error list
self._errors[to_update] = new_error
def _update_all_outputs(self, input_matrix, target_matrix):
"""Update all stored models."""
for i, (model, targets) in enumerate(
zip(self._models, _transpose_rowcol(target_matrix))):
self._errors[i] = model.train_step(input_matrix, targets)
def _get_reward(old_error, new_error):
"""Return RL agent reward.
Reward for RL agent is difference between new and previous error for output.
Plus small amount for error (prioritize higher error)
"""
return (old_error - new_error) + 0.2 * new_error
def _matrix_col(matrix, i):
"""Return the ith column of matrix."""
if isinstance(matrix, numpy.ndarray):
return _np_matrix_col(matrix, i)
# List of list
if isinstance(matrix[0], (list, tuple, numpy.ndarray)):
return [row[i] for row in matrix]
else:
# Only 1d, take row
return matrix[i]
def _np_matrix_col(matrix, i):
"""Return the ith column of matrix."""
if len(matrix.shape) == 1:
# Only 1d, take row
return matrix[i]
return matrix[:, i]
def _transpose_rowcol(matrix):
"""Return matrix with row and col swapped.
Other axis are left intact.
"""
if isinstance(matrix, numpy.ndarray):
return _np_transpose_rowcol(matrix)
# List of list
if isinstance(matrix[0], (list, tuple, numpy.ndarray)):
return zip(*matrix)
else:
# Only 1d, no change
return matrix
def _np_transpose_rowcol(matrix):
"""Return matrix with row and col swapped.
Other axis are left intact.
"""
if len(matrix.shape) == 1:
# Only 1d, no change
return matrix
return matrix.transpose([1, 0] + range(len(matrix.shape))[2:]) | learning/architecture/multioutputs.py |
import copy
import types
import pickle
import numpy
from learning import Model
from learning.rlearn import RLTable
class MultiOutputs(Model):
"""Ensemble enabling given model to return a higher dimensional output tensor.
Can be used to return an output vector from a model returing an output value.
Or an output matrix from a model returning an output vector.
Etc.
MultiOutputs can be nested.
For each output value,
a model mapping input_vec to output value is learned.
New outputs are returned by concatenating all model outputs.
Args:
models: list<Model> or Model; List of models,
or Model that is duplicated by num_outputs
num_outputs: How many components in target vectors.
"""
def __init__(self, models, num_outputs=None):
super(MultiOutputs, self).__init__()
if isinstance(models, Model):
# Store copy of model for each output
if num_outputs is None:
raise ValueError(
'If Model is given, num_outputs must not be None')
self._models = [copy.deepcopy(models) for _ in range(num_outputs)]
else:
if not isinstance(models, (list, tuple)):
raise ValueError('models must be list or tuple, or Model')
# Validate that list contains Model's, and no duplicates
for i, model in enumerate(models):
if not isinstance(model, Model):
raise ValueError('models must contain instances of Model')
# No duplicates
for other_model in models[i + 1:]:
if other_model is model:
raise ValueError(
'models should not contain duplicate instances')
# Validation done, store it
self._models = models[:]
self._num_outputs = len(self._models)
# Use reinforcement learning to select which output to update
# We use different between new and old error as reward
self._rl_agent = RLTable(
[None],
range(self._num_outputs),
initial_reward=1.0,
update_rate=0.25,
reward_growth=0.01)
self._errors = [None] * self._num_outputs
def reset(self):
"""Reset this model."""
super(MultiOutputs, self).reset()
# Reset RL agent
self._rl_agent = RLTable([None], range(self._num_outputs))
self._errors = [None] * self._num_outputs
# Reset each stored model
for model in self._models:
model.reset()
def activate(self, inputs):
"""Return the model outputs for given inputs.
One output for each stored model.
"""
return [model.activate(inputs) for model in self._models]
def train_step(self, input_matrix, target_matrix):
"""Adjust the model towards the targets for given inputs.
Adjusts each AUTO unit towards its corresponding target vector.
"""
if len(target_matrix[0]) != self._num_outputs:
raise ValueError(
'Target matrix column does not match expected number of outputs'
)
# Each model learns a single component (column) of the target matrix
# First iteration, we update all outputs (for baseline)
# Then we update only a single output value per iteration.
# NOTE: If a stored model doesn't return error, we default to updating all
# every iteration, because we can't know which is best to update
# TODO: Update None errors every iteration, and select one from non-Nones
if None in self._errors:
self._update_all_outputs(input_matrix, target_matrix)
else:
self._update_one_output(input_matrix, target_matrix)
try:
return sum(self._errors) / len(self._errors)
except TypeError:
# Model didn't return error
return None
def train(self, input_matrix, target_matrix, *args, **kwargs):
"""Train model to converge on set of patterns."""
if len(target_matrix[0]) != self._num_outputs:
raise ValueError(
'Target matrix column does not match expected number of outputs'
)
# Train each stored model
for i, (model, targets) in enumerate(
zip(self._models, _transpose_rowcol(target_matrix))):
if self.logging:
if i != 0:
print
print 'Training Model %d:' % (i + 1)
else:
model.logging = self.logging
model.train(input_matrix, targets, *args, **kwargs)
self.iteration = sum([model.iteration for model in self._models])
def serialize(self):
"""Convert model into string.
Returns:
string; A string representing this network.
"""
# Use serialize on each model, instead of pickle
serialized_models = [(type(model), model.serialize())
for model in self._models]
# Pickle all other attributes
attributes = copy.copy(self.__dict__)
del attributes['_models']
return pickle.dumps((serialized_models, attributes), protocol=2)
@classmethod
def unserialize(cls, serialized_model):
"""Convert serialized model into Model.
Returns:
Model; A Model object.
"""
serialized_models, attributes = pickle.loads(serialized_model)
# Make model, from serialized models and attributes
model = MultiOutputs.__new__(MultiOutputs)
model.__dict__ = attributes
# unserialize each model
model._models = [
class_.unserialize(model_str)
for class_, model_str in serialized_models
]
return model
def mse(self, input_vec, target_vec):
"""Return the mean squared error (MSE) for a pattern."""
return numpy.mean([
model.mse(input_vec, target)
for model, target in zip(self._models,
_transpose_rowcol(target_vec))
])
def _update_one_output(self, input_matrix, target_matrix):
"""Update the model that most shows the ability to improve."""
# Use reinforcement learning to select output to update.
to_update = self._rl_agent.get_action(None)
new_error = self._models[to_update].train_step(input_matrix,
_matrix_col(
target_matrix,
to_update))
# Update RL agent
self._rl_agent.update(None, to_update,
_get_reward(self._errors[to_update], new_error))
# Update our error list
self._errors[to_update] = new_error
def _update_all_outputs(self, input_matrix, target_matrix):
"""Update all stored models."""
for i, (model, targets) in enumerate(
zip(self._models, _transpose_rowcol(target_matrix))):
self._errors[i] = model.train_step(input_matrix, targets)
def _get_reward(old_error, new_error):
"""Return RL agent reward.
Reward for RL agent is difference between new and previous error for output.
Plus small amount for error (prioritize higher error)
"""
return (old_error - new_error) + 0.2 * new_error
def _matrix_col(matrix, i):
"""Return the ith column of matrix."""
if isinstance(matrix, numpy.ndarray):
return _np_matrix_col(matrix, i)
# List of list
if isinstance(matrix[0], (list, tuple, numpy.ndarray)):
return [row[i] for row in matrix]
else:
# Only 1d, take row
return matrix[i]
def _np_matrix_col(matrix, i):
"""Return the ith column of matrix."""
if len(matrix.shape) == 1:
# Only 1d, take row
return matrix[i]
return matrix[:, i]
def _transpose_rowcol(matrix):
"""Return matrix with row and col swapped.
Other axis are left intact.
"""
if isinstance(matrix, numpy.ndarray):
return _np_transpose_rowcol(matrix)
# List of list
if isinstance(matrix[0], (list, tuple, numpy.ndarray)):
return zip(*matrix)
else:
# Only 1d, no change
return matrix
def _np_transpose_rowcol(matrix):
"""Return matrix with row and col swapped.
Other axis are left intact.
"""
if len(matrix.shape) == 1:
# Only 1d, no change
return matrix
return matrix.transpose([1, 0] + range(len(matrix.shape))[2:]) | 0.798854 | 0.694607 |
from argparse import ArgumentParser
import codecs
import json
import logging
import os
import pickle
import sys
import tempfile
from typing import Union
import numpy as np
from rusenttokenize import ru_sent_tokenize
try:
from deep_ner.elmo_ner import ELMo_NER, elmo_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.utils import divide_dataset_by_sentences
from deep_ner.quality import calculate_prediction_quality
from deep_ner.udpipe_data import create_udpipe_pipeline
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from deep_ner.elmo_ner import ELMo_NER, elmo_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.utils import divide_dataset_by_sentences
from deep_ner.quality import calculate_prediction_quality
from deep_ner.udpipe_data import create_udpipe_pipeline
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
def train(factrueval2016_devset_dir: str, split_by_paragraphs: bool, elmo_will_be_tuned: bool,
use_lang_features: bool, use_shapes: bool, max_epochs: int, patience: int, batch_size: int,
lr: float, l2: float, gpu_memory_frac: float,
model_name: str, collection3_dir: Union[str, None]=None, n_max_samples: int=0) -> ELMo_NER:
if os.path.isfile(model_name):
with open(model_name, 'rb') as fp:
recognizer = pickle.load(fp)
assert isinstance(recognizer, ELMo_NER)
print('The NER has been successfully loaded from the file `{0}`...'.format(model_name))
print('')
else:
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_devset_dir, temp_json_name, split_by_paragraphs)
X, y = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
print('The FactRuEval-2016 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y)))
print('')
max_number_of_tokens = 0
pipeline = create_udpipe_pipeline('ru')
for cur in X:
spacy_doc = pipeline(cur)
n_tokens = 0
for _ in spacy_doc:
n_tokens += 1
del spacy_doc
if n_tokens > max_number_of_tokens:
max_number_of_tokens = n_tokens
del pipeline
print('Maximal number of tokens is {0}.'.format(max_number_of_tokens))
n_tokens = 2
while n_tokens < max_number_of_tokens:
n_tokens *= 2
elmo_hub_module_handle = 'http://files.deeppavlov.ai/deeppavlov_data/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz'
recognizer = ELMo_NER(
finetune_elmo=elmo_will_be_tuned, batch_size=batch_size, l2_reg=l2, max_seq_length=n_tokens,
elmo_hub_module_handle=elmo_hub_module_handle, validation_fraction=0.25, max_epochs=max_epochs,
patience=patience, gpu_memory_frac=gpu_memory_frac, verbose=True, random_seed=42, lr=lr, udpipe_lang='ru',
use_nlp_features=use_lang_features, use_shapes=use_shapes
)
if collection3_dir is None:
if n_max_samples > 0:
train_index, test_index = split_dataset(y=y, test_part=recognizer.validation_fraction)
X_train = np.array(X, dtype=object)[train_index]
y_train = np.array(y, dtype=object)[train_index]
X_val = np.array(X, dtype=object)[test_index]
y_val = np.array(y, dtype=object)[test_index]
del train_index, test_index
index = sample_from_dataset(y=y_train, n=n_max_samples)
recognizer.fit(X_train[index], y_train[index], validation_data=(X_val, y_val))
recognizer.fit(X, y)
else:
X_train, y_train = load_dataset_from_brat(collection3_dir, split_by_paragraphs=True)
if not split_by_paragraphs:
X_train, y_train = divide_dataset_by_sentences(X_train, y_train, sent_tokenize_func=ru_sent_tokenize)
for sample_idx in range(len(y_train)):
new_y_sample = dict()
for ne_type in sorted(list(y_train[sample_idx].keys())):
if ne_type == 'PER':
new_y_sample['PERSON'] = y_train[sample_idx][ne_type]
elif ne_type == 'LOC':
new_y_sample['LOCATION'] = y_train[sample_idx][ne_type]
else:
new_y_sample[ne_type] = y_train[sample_idx][ne_type]
y_train[sample_idx] = new_y_sample
del new_y_sample
print('The Collection3 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y_train)))
print('')
if n_max_samples > 0:
index = sample_from_dataset(y=y_train, n=n_max_samples)
X_train = np.array(X_train, dtype=object)[index]
y_train = np.array(y_train, dtype=object)[index]
del index
recognizer.fit(X_train, y_train, validation_data=(X, y))
with open(model_name, 'wb') as fp:
pickle.dump(recognizer, fp)
print('')
print('The NER has been successfully fitted and saved into the file `{0}`...'.format(model_name))
print('')
return recognizer
def recognize(factrueval2016_testset_dir: str, split_by_paragraphs: bool, recognizer: ELMo_NER, results_dir: str):
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_testset_dir, temp_json_name, split_by_paragraphs)
with codecs.open(temp_json_name, mode='r', encoding='utf-8', errors='ignore') as fp:
data_for_testing = json.load(fp)
_, true_entities = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
texts = []
additional_info = []
for cur_document in data_for_testing:
base_name = os.path.join(results_dir, cur_document['base_name'] + '.task1')
for cur_paragraph in cur_document['paragraph_bounds']:
texts.append(cur_document['text'][cur_paragraph[0]:cur_paragraph[1]])
additional_info.append((base_name, cur_paragraph))
print('Data for final testing have been loaded...')
print('Number of samples is {0}.'.format(len(true_entities)))
print('')
predicted_entities = recognizer.predict(texts)
assert len(predicted_entities) == len(true_entities)
f1, precision, recall, quality_by_entities = calculate_prediction_quality(
true_entities, predicted_entities, recognizer.classes_list_)
print('All entities:')
print(' F1-score is {0:.2%}.'.format(f1))
print(' Precision is {0:.2%}.'.format(precision))
print(' Recall is {0:.2%}.'.format(recall))
for ne_type in sorted(list(quality_by_entities.keys())):
print(' {0}'.format(ne_type))
print(' F1-score is {0:.2%}.'.format(quality_by_entities[ne_type][0]))
print(' Precision is {0:.2%}.'.format(quality_by_entities[ne_type][1]))
print(' Recall is {0:.2%}.'.format(quality_by_entities[ne_type][2]))
results_for_factrueval_2016 = dict()
for sample_idx, cur_result in enumerate(predicted_entities):
base_name, paragraph_bounds = additional_info[sample_idx]
for entity_type in cur_result:
if entity_type == 'ORG':
prepared_entity_type = 'org'
elif entity_type == 'PERSON':
prepared_entity_type = 'per'
elif entity_type == 'LOCATION':
prepared_entity_type = 'loc'
else:
prepared_entity_type = None
if prepared_entity_type is None:
raise ValueError('`{0}` is unknown entity type!'.format(entity_type))
for entity_bounds in cur_result[entity_type]:
postprocessed_entity = (
prepared_entity_type,
entity_bounds[0] + paragraph_bounds[0],
entity_bounds[1] - entity_bounds[0]
)
if base_name in results_for_factrueval_2016:
results_for_factrueval_2016[base_name].append(postprocessed_entity)
else:
results_for_factrueval_2016[base_name] = [postprocessed_entity]
for base_name in results_for_factrueval_2016:
with codecs.open(base_name, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_entity in sorted(results_for_factrueval_2016[base_name], key=lambda it: (it[1], it[2], it[0])):
fp.write('{0} {1} {2}\n'.format(cur_entity[0], cur_entity[1], cur_entity[2]))
def main():
parser = ArgumentParser()
parser.add_argument('-m', '--model', dest='model_name', type=str, required=True,
help='The binary file with the NER model.')
parser.add_argument('-n', '--number', dest='samples_number', type=int, required=False, default=None,
help='Number of samples of the training sub-set.')
parser.add_argument('-d', '--data', dest='data_name', type=str, required=True,
help='Path to the FactRuEval-2016 repository.')
parser.add_argument('-r', '--result', dest='result_name', type=str, required=True,
help='The directory into which all recognized named entity labels will be saved.')
parser.add_argument('-c', '--collection', dest='collection_data_name', type=str, required=False, default=None,
help='Path to the Collection-3 data set.')
parser.add_argument('--batch', dest='batch_size', type=int, required=False, default=16,
help='Size of mini-batch.')
parser.add_argument('--max_epochs', dest='max_epochs', type=int, required=False, default=100,
help='Maximal number of training epochs.')
parser.add_argument('--patience', dest='patience', type=int, required=False, default=10,
help='Number of iterations with no improvement to wait before stopping the training.')
parser.add_argument('--gpu_frac', dest='gpu_memory_frac', type=float, required=False, default=0.9,
help='Allocable part of the GPU memory for the NER model.')
parser.add_argument('--finetune_elmo', dest='finetune_elmo', required=False, action='store_true',
default=False, help='Will be the ELMo and CRF finetuned together? Or the ELMo will be frozen?')
parser.add_argument('--lr', dest='lr', type=float, required=False, default=1e-4, help='Learning rate.')
parser.add_argument('--l2', dest='l2_coeff', type=float, required=False, default=1e-2,
help='L2 regularization factor.')
parser.add_argument('--text', dest='text_unit', type=str, choices=['sentence', 'paragraph'], required=False,
default='sentence', help='Text unit: sentence or paragraph.')
parser.add_argument('--lang_features', dest='lang_features', required=False, action='store_true',
default=False, help='Will be morphology and syntax used as additional feautres?')
parser.add_argument('--shapes', dest='shapes', required=False, action='store_true',
default=False, help='Will be word shapes used as additional features?')
parser.add_argument('--seed', dest='random_seed', type=int, required=False, default=None,
help='The random seed.')
args = parser.parse_args()
if args.text_unit not in {'sentence', 'paragraph'}:
raise ValueError('`{0}` is wrong value for the `text_unit` parameter!'.format(args.text_unit))
collection3_dir_name = None if args.collection_data_name is None else os.path.normpath(args.collection_data_name)
devset_dir_name = os.path.join(os.path.normpath(args.data_name), 'devset')
testset_dir_name = os.path.join(os.path.normpath(args.data_name), 'testset')
if args.random_seed is not None:
set_total_seed(args.random_seed)
if args.samples_number is None:
samples_number = 0
else:
samples_number = args.samples_number
if samples_number < 1:
raise ValueError('The samples number in training sub-set is wrong! It must be a positive integer value.')
recognizer = train(factrueval2016_devset_dir=devset_dir_name, elmo_will_be_tuned=args.finetune_elmo,
max_epochs=args.max_epochs, patience=args.patience, batch_size=args.batch_size,
gpu_memory_frac=args.gpu_memory_frac, model_name=os.path.normpath(args.model_name), lr=args.lr,
l2=args.l2_coeff, split_by_paragraphs=(args.text_unit == 'paragraph'),
collection3_dir=collection3_dir_name, n_max_samples=samples_number,
use_lang_features=args.lang_features, use_shapes=args.shapes)
recognize(factrueval2016_testset_dir=testset_dir_name, recognizer=recognizer,
results_dir=os.path.normpath(args.result_name), split_by_paragraphs=(args.text_unit == 'paragraph'))
if __name__ == '__main__':
elmo_ner_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
elmo_ner_logger.addHandler(handler)
main() | demo/demo_elmo_factrueval2016.py | from argparse import ArgumentParser
import codecs
import json
import logging
import os
import pickle
import sys
import tempfile
from typing import Union
import numpy as np
from rusenttokenize import ru_sent_tokenize
try:
from deep_ner.elmo_ner import ELMo_NER, elmo_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.utils import divide_dataset_by_sentences
from deep_ner.quality import calculate_prediction_quality
from deep_ner.udpipe_data import create_udpipe_pipeline
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from deep_ner.elmo_ner import ELMo_NER, elmo_ner_logger
from deep_ner.utils import factrueval2016_to_json, load_dataset_from_json, load_dataset_from_brat, set_total_seed
from deep_ner.utils import divide_dataset_by_sentences
from deep_ner.quality import calculate_prediction_quality
from deep_ner.udpipe_data import create_udpipe_pipeline
from deep_ner.dataset_splitting import sample_from_dataset, split_dataset
def train(factrueval2016_devset_dir: str, split_by_paragraphs: bool, elmo_will_be_tuned: bool,
use_lang_features: bool, use_shapes: bool, max_epochs: int, patience: int, batch_size: int,
lr: float, l2: float, gpu_memory_frac: float,
model_name: str, collection3_dir: Union[str, None]=None, n_max_samples: int=0) -> ELMo_NER:
if os.path.isfile(model_name):
with open(model_name, 'rb') as fp:
recognizer = pickle.load(fp)
assert isinstance(recognizer, ELMo_NER)
print('The NER has been successfully loaded from the file `{0}`...'.format(model_name))
print('')
else:
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_devset_dir, temp_json_name, split_by_paragraphs)
X, y = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
print('The FactRuEval-2016 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y)))
print('')
max_number_of_tokens = 0
pipeline = create_udpipe_pipeline('ru')
for cur in X:
spacy_doc = pipeline(cur)
n_tokens = 0
for _ in spacy_doc:
n_tokens += 1
del spacy_doc
if n_tokens > max_number_of_tokens:
max_number_of_tokens = n_tokens
del pipeline
print('Maximal number of tokens is {0}.'.format(max_number_of_tokens))
n_tokens = 2
while n_tokens < max_number_of_tokens:
n_tokens *= 2
elmo_hub_module_handle = 'http://files.deeppavlov.ai/deeppavlov_data/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz'
recognizer = ELMo_NER(
finetune_elmo=elmo_will_be_tuned, batch_size=batch_size, l2_reg=l2, max_seq_length=n_tokens,
elmo_hub_module_handle=elmo_hub_module_handle, validation_fraction=0.25, max_epochs=max_epochs,
patience=patience, gpu_memory_frac=gpu_memory_frac, verbose=True, random_seed=42, lr=lr, udpipe_lang='ru',
use_nlp_features=use_lang_features, use_shapes=use_shapes
)
if collection3_dir is None:
if n_max_samples > 0:
train_index, test_index = split_dataset(y=y, test_part=recognizer.validation_fraction)
X_train = np.array(X, dtype=object)[train_index]
y_train = np.array(y, dtype=object)[train_index]
X_val = np.array(X, dtype=object)[test_index]
y_val = np.array(y, dtype=object)[test_index]
del train_index, test_index
index = sample_from_dataset(y=y_train, n=n_max_samples)
recognizer.fit(X_train[index], y_train[index], validation_data=(X_val, y_val))
recognizer.fit(X, y)
else:
X_train, y_train = load_dataset_from_brat(collection3_dir, split_by_paragraphs=True)
if not split_by_paragraphs:
X_train, y_train = divide_dataset_by_sentences(X_train, y_train, sent_tokenize_func=ru_sent_tokenize)
for sample_idx in range(len(y_train)):
new_y_sample = dict()
for ne_type in sorted(list(y_train[sample_idx].keys())):
if ne_type == 'PER':
new_y_sample['PERSON'] = y_train[sample_idx][ne_type]
elif ne_type == 'LOC':
new_y_sample['LOCATION'] = y_train[sample_idx][ne_type]
else:
new_y_sample[ne_type] = y_train[sample_idx][ne_type]
y_train[sample_idx] = new_y_sample
del new_y_sample
print('The Collection3 data for training have been loaded...')
print('Number of samples is {0}.'.format(len(y_train)))
print('')
if n_max_samples > 0:
index = sample_from_dataset(y=y_train, n=n_max_samples)
X_train = np.array(X_train, dtype=object)[index]
y_train = np.array(y_train, dtype=object)[index]
del index
recognizer.fit(X_train, y_train, validation_data=(X, y))
with open(model_name, 'wb') as fp:
pickle.dump(recognizer, fp)
print('')
print('The NER has been successfully fitted and saved into the file `{0}`...'.format(model_name))
print('')
return recognizer
def recognize(factrueval2016_testset_dir: str, split_by_paragraphs: bool, recognizer: ELMo_NER, results_dir: str):
temp_json_name = tempfile.NamedTemporaryFile(mode='w').name
try:
factrueval2016_to_json(factrueval2016_testset_dir, temp_json_name, split_by_paragraphs)
with codecs.open(temp_json_name, mode='r', encoding='utf-8', errors='ignore') as fp:
data_for_testing = json.load(fp)
_, true_entities = load_dataset_from_json(temp_json_name)
finally:
if os.path.isfile(temp_json_name):
os.remove(temp_json_name)
texts = []
additional_info = []
for cur_document in data_for_testing:
base_name = os.path.join(results_dir, cur_document['base_name'] + '.task1')
for cur_paragraph in cur_document['paragraph_bounds']:
texts.append(cur_document['text'][cur_paragraph[0]:cur_paragraph[1]])
additional_info.append((base_name, cur_paragraph))
print('Data for final testing have been loaded...')
print('Number of samples is {0}.'.format(len(true_entities)))
print('')
predicted_entities = recognizer.predict(texts)
assert len(predicted_entities) == len(true_entities)
f1, precision, recall, quality_by_entities = calculate_prediction_quality(
true_entities, predicted_entities, recognizer.classes_list_)
print('All entities:')
print(' F1-score is {0:.2%}.'.format(f1))
print(' Precision is {0:.2%}.'.format(precision))
print(' Recall is {0:.2%}.'.format(recall))
for ne_type in sorted(list(quality_by_entities.keys())):
print(' {0}'.format(ne_type))
print(' F1-score is {0:.2%}.'.format(quality_by_entities[ne_type][0]))
print(' Precision is {0:.2%}.'.format(quality_by_entities[ne_type][1]))
print(' Recall is {0:.2%}.'.format(quality_by_entities[ne_type][2]))
results_for_factrueval_2016 = dict()
for sample_idx, cur_result in enumerate(predicted_entities):
base_name, paragraph_bounds = additional_info[sample_idx]
for entity_type in cur_result:
if entity_type == 'ORG':
prepared_entity_type = 'org'
elif entity_type == 'PERSON':
prepared_entity_type = 'per'
elif entity_type == 'LOCATION':
prepared_entity_type = 'loc'
else:
prepared_entity_type = None
if prepared_entity_type is None:
raise ValueError('`{0}` is unknown entity type!'.format(entity_type))
for entity_bounds in cur_result[entity_type]:
postprocessed_entity = (
prepared_entity_type,
entity_bounds[0] + paragraph_bounds[0],
entity_bounds[1] - entity_bounds[0]
)
if base_name in results_for_factrueval_2016:
results_for_factrueval_2016[base_name].append(postprocessed_entity)
else:
results_for_factrueval_2016[base_name] = [postprocessed_entity]
for base_name in results_for_factrueval_2016:
with codecs.open(base_name, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_entity in sorted(results_for_factrueval_2016[base_name], key=lambda it: (it[1], it[2], it[0])):
fp.write('{0} {1} {2}\n'.format(cur_entity[0], cur_entity[1], cur_entity[2]))
def main():
parser = ArgumentParser()
parser.add_argument('-m', '--model', dest='model_name', type=str, required=True,
help='The binary file with the NER model.')
parser.add_argument('-n', '--number', dest='samples_number', type=int, required=False, default=None,
help='Number of samples of the training sub-set.')
parser.add_argument('-d', '--data', dest='data_name', type=str, required=True,
help='Path to the FactRuEval-2016 repository.')
parser.add_argument('-r', '--result', dest='result_name', type=str, required=True,
help='The directory into which all recognized named entity labels will be saved.')
parser.add_argument('-c', '--collection', dest='collection_data_name', type=str, required=False, default=None,
help='Path to the Collection-3 data set.')
parser.add_argument('--batch', dest='batch_size', type=int, required=False, default=16,
help='Size of mini-batch.')
parser.add_argument('--max_epochs', dest='max_epochs', type=int, required=False, default=100,
help='Maximal number of training epochs.')
parser.add_argument('--patience', dest='patience', type=int, required=False, default=10,
help='Number of iterations with no improvement to wait before stopping the training.')
parser.add_argument('--gpu_frac', dest='gpu_memory_frac', type=float, required=False, default=0.9,
help='Allocable part of the GPU memory for the NER model.')
parser.add_argument('--finetune_elmo', dest='finetune_elmo', required=False, action='store_true',
default=False, help='Will be the ELMo and CRF finetuned together? Or the ELMo will be frozen?')
parser.add_argument('--lr', dest='lr', type=float, required=False, default=1e-4, help='Learning rate.')
parser.add_argument('--l2', dest='l2_coeff', type=float, required=False, default=1e-2,
help='L2 regularization factor.')
parser.add_argument('--text', dest='text_unit', type=str, choices=['sentence', 'paragraph'], required=False,
default='sentence', help='Text unit: sentence or paragraph.')
parser.add_argument('--lang_features', dest='lang_features', required=False, action='store_true',
default=False, help='Will be morphology and syntax used as additional feautres?')
parser.add_argument('--shapes', dest='shapes', required=False, action='store_true',
default=False, help='Will be word shapes used as additional features?')
parser.add_argument('--seed', dest='random_seed', type=int, required=False, default=None,
help='The random seed.')
args = parser.parse_args()
if args.text_unit not in {'sentence', 'paragraph'}:
raise ValueError('`{0}` is wrong value for the `text_unit` parameter!'.format(args.text_unit))
collection3_dir_name = None if args.collection_data_name is None else os.path.normpath(args.collection_data_name)
devset_dir_name = os.path.join(os.path.normpath(args.data_name), 'devset')
testset_dir_name = os.path.join(os.path.normpath(args.data_name), 'testset')
if args.random_seed is not None:
set_total_seed(args.random_seed)
if args.samples_number is None:
samples_number = 0
else:
samples_number = args.samples_number
if samples_number < 1:
raise ValueError('The samples number in training sub-set is wrong! It must be a positive integer value.')
recognizer = train(factrueval2016_devset_dir=devset_dir_name, elmo_will_be_tuned=args.finetune_elmo,
max_epochs=args.max_epochs, patience=args.patience, batch_size=args.batch_size,
gpu_memory_frac=args.gpu_memory_frac, model_name=os.path.normpath(args.model_name), lr=args.lr,
l2=args.l2_coeff, split_by_paragraphs=(args.text_unit == 'paragraph'),
collection3_dir=collection3_dir_name, n_max_samples=samples_number,
use_lang_features=args.lang_features, use_shapes=args.shapes)
recognize(factrueval2016_testset_dir=testset_dir_name, recognizer=recognizer,
results_dir=os.path.normpath(args.result_name), split_by_paragraphs=(args.text_unit == 'paragraph'))
if __name__ == '__main__':
elmo_ner_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
elmo_ner_logger.addHandler(handler)
main() | 0.415847 | 0.186095 |
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
# Create your models here.
class PhoneOTP(models.Model):
phone_regex = RegexValidator( regex =r'^\+?1?\d{9,14}$', message ="Phone number must be entered in the format: '+999999999'. Up to 14 digits allowed.")
mobile = models.CharField(validators=[phone_regex], max_length=17, unique=True)
isVerified = models.BooleanField(blank=False, default=False)
counter = models.IntegerField(default=0, blank=False) # For HOTP Verification
def __str__(self):
return str(self.mobile)
class UserManager(BaseUserManager):
def create_user(self, phone, password=<PASSWORD>, is_staff=False, is_active=True, is_admin=False):
if not phone:
raise ValueError('users must have a phone number')
if not password:
raise ValueError('user must have a password')
user_obj = self.model(
phone=phone,
is_staff=is_staff,
is_active=is_active,
is_admin=is_admin,
)
user_obj.set_password(password)
# user_obj.staff = is_staff
# user_obj.admin = is_admin
# user_obj.active = is_active
user_obj.save(using=self._db)
return user_obj
def create_staffuser(self, phone, password=None):
user = self.create_user(
phone,
password=password,
is_staff=True,
)
return user
def create_superuser(self, phone, password=<PASSWORD>):
user = self.create_user(
phone,
password=password,
is_staff=True,
is_admin=True,
)
return user
class User(AbstractBaseUser):
phone_regex = RegexValidator( regex =r'^\+?1?\d{9,14}$', message ="Phone number must be entered in the format: '+999999999'. Up to 14 digits allowed.")
phone = models.CharField(validators=[phone_regex], max_length=17, unique=True)
timestamp = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(
_('admin status'),
default=False,
help_text=_('Designates whether the user is admin/superuser.'),
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
USERNAME_FIELD = 'phone'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.phone
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True | api/models.py | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
# Create your models here.
class PhoneOTP(models.Model):
phone_regex = RegexValidator( regex =r'^\+?1?\d{9,14}$', message ="Phone number must be entered in the format: '+999999999'. Up to 14 digits allowed.")
mobile = models.CharField(validators=[phone_regex], max_length=17, unique=True)
isVerified = models.BooleanField(blank=False, default=False)
counter = models.IntegerField(default=0, blank=False) # For HOTP Verification
def __str__(self):
return str(self.mobile)
class UserManager(BaseUserManager):
def create_user(self, phone, password=<PASSWORD>, is_staff=False, is_active=True, is_admin=False):
if not phone:
raise ValueError('users must have a phone number')
if not password:
raise ValueError('user must have a password')
user_obj = self.model(
phone=phone,
is_staff=is_staff,
is_active=is_active,
is_admin=is_admin,
)
user_obj.set_password(password)
# user_obj.staff = is_staff
# user_obj.admin = is_admin
# user_obj.active = is_active
user_obj.save(using=self._db)
return user_obj
def create_staffuser(self, phone, password=None):
user = self.create_user(
phone,
password=password,
is_staff=True,
)
return user
def create_superuser(self, phone, password=<PASSWORD>):
user = self.create_user(
phone,
password=password,
is_staff=True,
is_admin=True,
)
return user
class User(AbstractBaseUser):
phone_regex = RegexValidator( regex =r'^\+?1?\d{9,14}$', message ="Phone number must be entered in the format: '+999999999'. Up to 14 digits allowed.")
phone = models.CharField(validators=[phone_regex], max_length=17, unique=True)
timestamp = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(
_('admin status'),
default=False,
help_text=_('Designates whether the user is admin/superuser.'),
)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
USERNAME_FIELD = 'phone'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.phone
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True | 0.481698 | 0.087837 |
import os
import shutil
from PIL import Image
# ============================ CONFIG ================================
user = os.environ['USERPROFILE']
dest_folder = r"Downloads\_tmp_" # Destination folder in Downloads
delete_portraits = False # delete portraits or not?
min_size = 500 # dimension in pixels
# ============================= PATHS ================================
local_packages = r"AppData\Local\Packages"
spotlight_folder = r"Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\LocalState\Assets"
origin = os.path.join(user, local_packages, spotlight_folder)
destination = os.path.join(user, dest_folder)
# ======================== COLOR MESSAGES ============================
COLOR_HEAD = '\033[95m'
COLOR_OK = '\033[92m'
COLOR_WARN = '\033[93m'
COLOR_FAIL = '\033[91m'
COLOR_ENDC = '\033[0m'
color_msg_fail_origin = COLOR_FAIL + "origin path does not exists:\n" + COLOR_ENDC
color_msg_fail_copy = COLOR_FAIL + "copy fail:\n" + COLOR_ENDC
color_msg_fail_rename = COLOR_FAIL + "rename fail:\n" + COLOR_ENDC
color_msg_fail_identify = COLOR_FAIL + "identify fail -> deleted" + COLOR_ENDC
color_msg_warn_exists = COLOR_WARN + "already exists -> not copied" + COLOR_ENDC
color_msg_ok_del_small = COLOR_FAIL + "too small -> deleted" + COLOR_ENDC
color_msg_ok_del_portrait = COLOR_FAIL + "portrait -> deleted" + COLOR_ENDC
color_msg_ok_keep_portrait = COLOR_OK + "portrait -> kept" + COLOR_ENDC
color_msg_ok_keep_landscape = COLOR_OK + "landscape -> kept" + COLOR_ENDC
print(COLOR_HEAD + "Made by `SylannBin` (copyleft)\n\
Thank you for using this simple script. I hope it serves you well.\n\
Attention! This script works for Windows only.\n\
Thanks to Microsoft for bringing us beautiful images.\n" + COLOR_ENDC)
# ============================= SCRIPT ===============================
# Ensure destination folder exists
if not os.path.exists(destination):
os.makedirs(destination)
print("Copying files from:\n{0}\nto: {1}\n\nProcessing...\n".format(origin, destination))
# Fetch images from origin and work on each of them
for filename in os.listdir(origin):
# prepare all paths and names
shortname = filename[:16] + '.jpg'
origpath = os.path.join(origin, filename)
destpath = os.path.join(destination, filename)
bestpath = os.path.join(destination, shortname)
# check the origin file
if not os.path.isfile(origpath):
print(color_msg_fail_origin + origpath)
break
# check that file does not aready exist in destination folder
if os.path.isfile(bestpath):
print(shortname + ": ---- x ---- | " + color_msg_warn_exists)
continue
# make copy
shutil.copy(origpath, destination)
# check
if not os.path.isfile(destpath):
print(color_msg_fail_copy + destpath)
break
# rename copy file
os.rename(destpath, bestpath)
# check
if not os.path.isfile(bestpath):
print(color_msg_fail_rename + bestpath)
break
# Get dimensions
try:
width, height = Image.open(bestpath).size
except:
width, height = 0,0
# format a label for display purpose
# label = shortname +': ' + str(width).rjust(4) + ' x ' + str(height).rjust(4) + ' | '
label = "{0}: {1:>4} x {2:>4} | ".format(shortname, width, height)
# remove small images
# [remove portraits]
# keep landscapes
if height == 0 and width == 0:
os.unlink(bestpath)
print(label + color_msg_fail_identify)
elif height < min_size or width < min_size:
os.unlink(bestpath)
print(label + color_msg_ok_del_small)
elif height > width and delete_portraits:
os.unlink(bestpath)
print(label + color_msg_ok_del_portrait)
elif height > width:
print(label + color_msg_ok_keep_portrait)
else:
print(label + color_msg_ok_keep_landscape) | Spotlight/run.py | import os
import shutil
from PIL import Image
# ============================ CONFIG ================================
user = os.environ['USERPROFILE']
dest_folder = r"Downloads\_tmp_" # Destination folder in Downloads
delete_portraits = False # delete portraits or not?
min_size = 500 # dimension in pixels
# ============================= PATHS ================================
local_packages = r"AppData\Local\Packages"
spotlight_folder = r"Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\LocalState\Assets"
origin = os.path.join(user, local_packages, spotlight_folder)
destination = os.path.join(user, dest_folder)
# ======================== COLOR MESSAGES ============================
COLOR_HEAD = '\033[95m'
COLOR_OK = '\033[92m'
COLOR_WARN = '\033[93m'
COLOR_FAIL = '\033[91m'
COLOR_ENDC = '\033[0m'
color_msg_fail_origin = COLOR_FAIL + "origin path does not exists:\n" + COLOR_ENDC
color_msg_fail_copy = COLOR_FAIL + "copy fail:\n" + COLOR_ENDC
color_msg_fail_rename = COLOR_FAIL + "rename fail:\n" + COLOR_ENDC
color_msg_fail_identify = COLOR_FAIL + "identify fail -> deleted" + COLOR_ENDC
color_msg_warn_exists = COLOR_WARN + "already exists -> not copied" + COLOR_ENDC
color_msg_ok_del_small = COLOR_FAIL + "too small -> deleted" + COLOR_ENDC
color_msg_ok_del_portrait = COLOR_FAIL + "portrait -> deleted" + COLOR_ENDC
color_msg_ok_keep_portrait = COLOR_OK + "portrait -> kept" + COLOR_ENDC
color_msg_ok_keep_landscape = COLOR_OK + "landscape -> kept" + COLOR_ENDC
print(COLOR_HEAD + "Made by `SylannBin` (copyleft)\n\
Thank you for using this simple script. I hope it serves you well.\n\
Attention! This script works for Windows only.\n\
Thanks to Microsoft for bringing us beautiful images.\n" + COLOR_ENDC)
# ============================= SCRIPT ===============================
# Ensure destination folder exists
if not os.path.exists(destination):
os.makedirs(destination)
print("Copying files from:\n{0}\nto: {1}\n\nProcessing...\n".format(origin, destination))
# Fetch images from origin and work on each of them
for filename in os.listdir(origin):
# prepare all paths and names
shortname = filename[:16] + '.jpg'
origpath = os.path.join(origin, filename)
destpath = os.path.join(destination, filename)
bestpath = os.path.join(destination, shortname)
# check the origin file
if not os.path.isfile(origpath):
print(color_msg_fail_origin + origpath)
break
# check that file does not aready exist in destination folder
if os.path.isfile(bestpath):
print(shortname + ": ---- x ---- | " + color_msg_warn_exists)
continue
# make copy
shutil.copy(origpath, destination)
# check
if not os.path.isfile(destpath):
print(color_msg_fail_copy + destpath)
break
# rename copy file
os.rename(destpath, bestpath)
# check
if not os.path.isfile(bestpath):
print(color_msg_fail_rename + bestpath)
break
# Get dimensions
try:
width, height = Image.open(bestpath).size
except:
width, height = 0,0
# format a label for display purpose
# label = shortname +': ' + str(width).rjust(4) + ' x ' + str(height).rjust(4) + ' | '
label = "{0}: {1:>4} x {2:>4} | ".format(shortname, width, height)
# remove small images
# [remove portraits]
# keep landscapes
if height == 0 and width == 0:
os.unlink(bestpath)
print(label + color_msg_fail_identify)
elif height < min_size or width < min_size:
os.unlink(bestpath)
print(label + color_msg_ok_del_small)
elif height > width and delete_portraits:
os.unlink(bestpath)
print(label + color_msg_ok_del_portrait)
elif height > width:
print(label + color_msg_ok_keep_portrait)
else:
print(label + color_msg_ok_keep_landscape) | 0.293404 | 0.058939 |
# Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import os
import numpy as np
from pyrad.io import get_trtfile_list, read_trt_data, write_trt_cell_data
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'start_times', type=str,
help=('Start times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
parser.add_argument(
'end_times', type=str,
help=('End times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
# keyword arguments
parser.add_argument(
'--raw_trtbase', type=str,
default='/store/msrad/radar/rad4alp/TRT/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--proc_trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--nsteps_min', type=int,
default=3,
help=('Minimum number of time steps to consider the TRT cell ' +
'worth processing'))
args = parser.parse_args()
print("====== TRT cell extraction started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== TRT cell extraction finished: ")
start_time_list = args.start_times.split(',')
end_time_list = args.end_times.split(',')
for i, start_time_str in enumerate(start_time_list):
end_time_str = end_time_list[i]
starttime = datetime.datetime.strptime(start_time_str, '%Y%m%d%H%M%S')
endtime = datetime.datetime.strptime(end_time_str, '%Y%m%d%H%M%S')
data_input_path = (
args.raw_trtbase+starttime.strftime('%y%j/TRTC%y%j/'))
data_output_path = (
args.proc_trtbase+starttime.strftime('%Y-%m-%d')+'/TRTC_cell/')
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
flist = get_trtfile_list(data_input_path, starttime, endtime)
if flist is None:
continue
traj_ID = np.array([], dtype=int)
yyyymmddHHMM = np.array([], dtype=datetime.datetime)
lon = np.array([], dtype=float)
lat = np.array([], dtype=float)
ell_L = np.array([], dtype=float)
ell_S = np.array([], dtype=float)
ell_or = np.array([], dtype=float)
area = np.array([], dtype=float)
vel_x = np.ma.array([], dtype=float)
vel_y = np.ma.array([], dtype=float)
det = np.ma.array([], dtype=float)
RANKr = np.array([], dtype=int)
CG_n = np.array([], dtype=int)
CG_p = np.array([], dtype=int)
CG = np.array([], dtype=int)
CG_percent_p = np.ma.array([], dtype=float)
ET45 = np.ma.array([], dtype=float)
ET45m = np.ma.array([], dtype=float)
ET15 = np.ma.array([], dtype=float)
ET15m = np.ma.array([], dtype=float)
VIL = np.ma.array([], dtype=float)
maxH = np.ma.array([], dtype=float)
maxHm = np.ma.array([], dtype=float)
POH = np.ma.array([], dtype=float)
RANK = np.ma.array([], dtype=float)
Dvel_x = np.ma.array([], dtype=float)
Dvel_y = np.ma.array([], dtype=float)
cell_contour = []
for fname in flist:
print('Reading TRT file '+fname)
(traj_ID_aux, yyyymmddHHMM_aux, lon_aux, lat_aux, ell_L_aux,
ell_S_aux, ell_or_aux, area_aux, vel_x_aux, vel_y_aux, det_aux,
RANKr_aux, CG_n_aux, CG_p_aux, CG_aux, CG_percent_p_aux,
ET45_aux, ET45m_aux, ET15_aux, ET15m_aux, VIL_aux, maxH_aux,
maxHm_aux, POH_aux, RANK_aux, Dvel_x_aux, Dvel_y_aux,
cell_contour_aux) = read_trt_data(fname)
if traj_ID_aux is None:
continue
traj_ID = np.append(traj_ID, traj_ID_aux)
yyyymmddHHMM = np.append(yyyymmddHHMM, yyyymmddHHMM_aux)
lon = np.append(lon, lon_aux)
lat = np.append(lat, lat_aux)
ell_L = np.append(ell_L, ell_L_aux)
ell_S = np.append(ell_S, ell_S_aux)
ell_or = np.append(ell_or, ell_or_aux)
area = np.append(area, area_aux)
vel_x = np.append(vel_x, vel_x_aux)
vel_y = np.append(vel_y, vel_y_aux)
det = np.append(det, det_aux)
RANKr = np.append(RANKr, RANKr_aux)
CG_n = np.append(CG_n, CG_n_aux)
CG_p = np.append(CG_p, CG_p_aux)
CG = np.append(CG, CG_aux)
CG_percent_p = np.append(CG_percent_p, CG_percent_p_aux)
ET45 = np.append(ET45, ET45_aux)
ET45m = np.append(ET45m, ET45m_aux)
ET15 = np.append(ET15, ET15_aux)
ET15m = np.append(ET15m, ET15m_aux)
VIL = np.append(VIL, VIL_aux)
maxH = np.append(maxH, maxH_aux)
maxHm = np.append(maxHm, maxHm_aux)
POH = np.append(POH, POH_aux)
RANK = np.append(RANK, RANK_aux)
Dvel_x = np.append(Dvel_x, Dvel_x_aux)
Dvel_y = np.append(Dvel_y, Dvel_y_aux)
cell_contour.extend(cell_contour_aux)
traj_ID_unique_list = np.unique(traj_ID)
print('Total Number of cells: '+str(traj_ID_unique_list.size))
ncells = 0
for traj_ID_unique in traj_ID_unique_list:
ind = np.where(traj_ID == traj_ID_unique)[0]
if ind.size < args.nsteps_min:
continue
traj_ID_cell = traj_ID[ind]
yyyymmddHHMM_cell = yyyymmddHHMM[ind]
lon_cell = lon[ind]
lat_cell = lat[ind]
ell_L_cell = ell_L[ind]
ell_S_cell = ell_S[ind]
ell_or_cell = ell_or[ind]
area_cell = area[ind]
vel_x_cell = vel_x[ind]
vel_y_cell = vel_y[ind]
det_cell = det[ind]
RANKr_cell = RANKr[ind]
CG_n_cell = CG_n[ind]
CG_p_cell = CG_p[ind]
CG_cell = CG[ind]
CG_percent_p_cell = CG_percent_p[ind]
ET45_cell = ET45[ind]
ET45m_cell = ET45m[ind]
ET15_cell = ET15[ind]
ET15m_cell = ET15m[ind]
VIL_cell = VIL[ind]
maxH_cell = maxH[ind]
maxHm_cell = maxHm[ind]
POH_cell = POH[ind]
RANK_cell = RANK[ind]
Dvel_x_cell = Dvel_x[ind]
Dvel_y_cell = Dvel_y[ind]
cell_contour_cell = []
for ind_el in ind:
cell_contour_cell.append(cell_contour[ind_el])
fname = data_output_path+str(traj_ID_unique)+'.trt'
fname = write_trt_cell_data(
traj_ID_cell, yyyymmddHHMM_cell, lon_cell, lat_cell,
ell_L_cell, ell_S_cell, ell_or_cell, area_cell, vel_x_cell,
vel_y_cell, det_cell, RANKr_cell, CG_n_cell, CG_p_cell,
CG_cell, CG_percent_p_cell, ET45_cell, ET45m_cell, ET15_cell,
ET15m_cell, VIL_cell, maxH_cell, maxHm_cell, POH_cell,
RANK_cell, Dvel_x_cell,
Dvel_y_cell, cell_contour_cell, fname)
print('Written individual TRT cell file '+fname)
ncells += 1
print('Number of cells with '+str(args.nsteps_min) +
' or more time steps: '+str(ncells))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main() | src/pyrad_proc/pyrad/EGG-INFO/scripts/main_extract_trt.py | # Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import os
import numpy as np
from pyrad.io import get_trtfile_list, read_trt_data, write_trt_cell_data
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'start_times', type=str,
help=('Start times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
parser.add_argument(
'end_times', type=str,
help=('End times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
# keyword arguments
parser.add_argument(
'--raw_trtbase', type=str,
default='/store/msrad/radar/rad4alp/TRT/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--proc_trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--nsteps_min', type=int,
default=3,
help=('Minimum number of time steps to consider the TRT cell ' +
'worth processing'))
args = parser.parse_args()
print("====== TRT cell extraction started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== TRT cell extraction finished: ")
start_time_list = args.start_times.split(',')
end_time_list = args.end_times.split(',')
for i, start_time_str in enumerate(start_time_list):
end_time_str = end_time_list[i]
starttime = datetime.datetime.strptime(start_time_str, '%Y%m%d%H%M%S')
endtime = datetime.datetime.strptime(end_time_str, '%Y%m%d%H%M%S')
data_input_path = (
args.raw_trtbase+starttime.strftime('%y%j/TRTC%y%j/'))
data_output_path = (
args.proc_trtbase+starttime.strftime('%Y-%m-%d')+'/TRTC_cell/')
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
flist = get_trtfile_list(data_input_path, starttime, endtime)
if flist is None:
continue
traj_ID = np.array([], dtype=int)
yyyymmddHHMM = np.array([], dtype=datetime.datetime)
lon = np.array([], dtype=float)
lat = np.array([], dtype=float)
ell_L = np.array([], dtype=float)
ell_S = np.array([], dtype=float)
ell_or = np.array([], dtype=float)
area = np.array([], dtype=float)
vel_x = np.ma.array([], dtype=float)
vel_y = np.ma.array([], dtype=float)
det = np.ma.array([], dtype=float)
RANKr = np.array([], dtype=int)
CG_n = np.array([], dtype=int)
CG_p = np.array([], dtype=int)
CG = np.array([], dtype=int)
CG_percent_p = np.ma.array([], dtype=float)
ET45 = np.ma.array([], dtype=float)
ET45m = np.ma.array([], dtype=float)
ET15 = np.ma.array([], dtype=float)
ET15m = np.ma.array([], dtype=float)
VIL = np.ma.array([], dtype=float)
maxH = np.ma.array([], dtype=float)
maxHm = np.ma.array([], dtype=float)
POH = np.ma.array([], dtype=float)
RANK = np.ma.array([], dtype=float)
Dvel_x = np.ma.array([], dtype=float)
Dvel_y = np.ma.array([], dtype=float)
cell_contour = []
for fname in flist:
print('Reading TRT file '+fname)
(traj_ID_aux, yyyymmddHHMM_aux, lon_aux, lat_aux, ell_L_aux,
ell_S_aux, ell_or_aux, area_aux, vel_x_aux, vel_y_aux, det_aux,
RANKr_aux, CG_n_aux, CG_p_aux, CG_aux, CG_percent_p_aux,
ET45_aux, ET45m_aux, ET15_aux, ET15m_aux, VIL_aux, maxH_aux,
maxHm_aux, POH_aux, RANK_aux, Dvel_x_aux, Dvel_y_aux,
cell_contour_aux) = read_trt_data(fname)
if traj_ID_aux is None:
continue
traj_ID = np.append(traj_ID, traj_ID_aux)
yyyymmddHHMM = np.append(yyyymmddHHMM, yyyymmddHHMM_aux)
lon = np.append(lon, lon_aux)
lat = np.append(lat, lat_aux)
ell_L = np.append(ell_L, ell_L_aux)
ell_S = np.append(ell_S, ell_S_aux)
ell_or = np.append(ell_or, ell_or_aux)
area = np.append(area, area_aux)
vel_x = np.append(vel_x, vel_x_aux)
vel_y = np.append(vel_y, vel_y_aux)
det = np.append(det, det_aux)
RANKr = np.append(RANKr, RANKr_aux)
CG_n = np.append(CG_n, CG_n_aux)
CG_p = np.append(CG_p, CG_p_aux)
CG = np.append(CG, CG_aux)
CG_percent_p = np.append(CG_percent_p, CG_percent_p_aux)
ET45 = np.append(ET45, ET45_aux)
ET45m = np.append(ET45m, ET45m_aux)
ET15 = np.append(ET15, ET15_aux)
ET15m = np.append(ET15m, ET15m_aux)
VIL = np.append(VIL, VIL_aux)
maxH = np.append(maxH, maxH_aux)
maxHm = np.append(maxHm, maxHm_aux)
POH = np.append(POH, POH_aux)
RANK = np.append(RANK, RANK_aux)
Dvel_x = np.append(Dvel_x, Dvel_x_aux)
Dvel_y = np.append(Dvel_y, Dvel_y_aux)
cell_contour.extend(cell_contour_aux)
traj_ID_unique_list = np.unique(traj_ID)
print('Total Number of cells: '+str(traj_ID_unique_list.size))
ncells = 0
for traj_ID_unique in traj_ID_unique_list:
ind = np.where(traj_ID == traj_ID_unique)[0]
if ind.size < args.nsteps_min:
continue
traj_ID_cell = traj_ID[ind]
yyyymmddHHMM_cell = yyyymmddHHMM[ind]
lon_cell = lon[ind]
lat_cell = lat[ind]
ell_L_cell = ell_L[ind]
ell_S_cell = ell_S[ind]
ell_or_cell = ell_or[ind]
area_cell = area[ind]
vel_x_cell = vel_x[ind]
vel_y_cell = vel_y[ind]
det_cell = det[ind]
RANKr_cell = RANKr[ind]
CG_n_cell = CG_n[ind]
CG_p_cell = CG_p[ind]
CG_cell = CG[ind]
CG_percent_p_cell = CG_percent_p[ind]
ET45_cell = ET45[ind]
ET45m_cell = ET45m[ind]
ET15_cell = ET15[ind]
ET15m_cell = ET15m[ind]
VIL_cell = VIL[ind]
maxH_cell = maxH[ind]
maxHm_cell = maxHm[ind]
POH_cell = POH[ind]
RANK_cell = RANK[ind]
Dvel_x_cell = Dvel_x[ind]
Dvel_y_cell = Dvel_y[ind]
cell_contour_cell = []
for ind_el in ind:
cell_contour_cell.append(cell_contour[ind_el])
fname = data_output_path+str(traj_ID_unique)+'.trt'
fname = write_trt_cell_data(
traj_ID_cell, yyyymmddHHMM_cell, lon_cell, lat_cell,
ell_L_cell, ell_S_cell, ell_or_cell, area_cell, vel_x_cell,
vel_y_cell, det_cell, RANKr_cell, CG_n_cell, CG_p_cell,
CG_cell, CG_percent_p_cell, ET45_cell, ET45m_cell, ET15_cell,
ET15m_cell, VIL_cell, maxH_cell, maxHm_cell, POH_cell,
RANK_cell, Dvel_x_cell,
Dvel_y_cell, cell_contour_cell, fname)
print('Written individual TRT cell file '+fname)
ncells += 1
print('Number of cells with '+str(args.nsteps_min) +
' or more time steps: '+str(ncells))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main() | 0.405096 | 0.228329 |
import os
import sys
from importlib.abc import MetaPathFinder, Loader
from importlib.machinery import ModuleSpec
from .lib_import import VersionImporter
__all__ = [
'init_finder', 'init_loader', 'loader',
'import_name_to_name_version', 'PyLibImportFinder', 'PyLibImportLoader',
]
LOADER = None
FINDER = None
def init_finder(**kwargs):
"""Create the global VersionImporter and initialize the finder."""
global FINDER
if len(kwargs) > 0:
init_loader(**kwargs)
# I must insert it at the beginning so it goes before FileFinder
FINDER = PyLibImportFinder()
sys.meta_path.insert(0, FINDER)
def init_loader(download_dir=None, install_dir=None, index_url='https://pypi.org/simple/', python_version=None,
install_dependencies=False, reset_modules=True, clean_modules=False, contained_modules=None, **kwargs):
"""Create the general global importer."""
global LOADER
LOADER = PyLibImportLoader(download_dir=download_dir,
install_dir=install_dir,
index_url=index_url,
python_version=python_version,
install_dependencies=install_dependencies,
reset_modules=reset_modules,
clean_modules=clean_modules,
contained_modules=contained_modules, **kwargs)
return LOADER
def loader():
"""Return the global VersionImporter/Loader."""
global LOADER
return LOADER
def import_name_to_name_version(import_name):
"""Convert the import name to a name and version.
Args:
import_name (str): Import name with a version (Ex: "custom_0_0_0")
Returns:
name (str): Just the package name (Ex: "custom")
version (str)[None]: Version of the package (Ex: "0.0.0")
"""
s = str(import_name).split('_')
for i, n in enumerate(s):
if len(n) > 0 and n.isdigit():
return '_'.join(s[:i]), '.'.join(s[i:])
return import_name, None
class PyLibImportFinder(MetaPathFinder):
@classmethod
def find_spec(cls, full_name, paths=None, target=None):
imp = loader()
name, version = import_name_to_name_version(full_name)
if imp is None or version is None:
return None
# Check if name and version is available to the importer
name, version, import_name, path = imp.find_module(name, version)
if path is not None:
import_path = imp.make_import_path(name, version)
return ModuleSpec(import_name, imp, origin=import_path)
class PyLibImportLoader(VersionImporter, Loader):
@classmethod
def exec_module(cls, module):
imp = module.__spec__.loader
import_name = module.__spec__.name
import_path = module.__spec__.origin
name, version = import_name_to_name_version(import_name)
try:
imp.import_module(name, version) # If success will be sys.modules["custom_0_0_0"]
module.__path__ = [import_path]
except (ImportError, Exception):
pass | pylibimport/finder_loader.py | import os
import sys
from importlib.abc import MetaPathFinder, Loader
from importlib.machinery import ModuleSpec
from .lib_import import VersionImporter
__all__ = [
'init_finder', 'init_loader', 'loader',
'import_name_to_name_version', 'PyLibImportFinder', 'PyLibImportLoader',
]
LOADER = None
FINDER = None
def init_finder(**kwargs):
"""Create the global VersionImporter and initialize the finder."""
global FINDER
if len(kwargs) > 0:
init_loader(**kwargs)
# I must insert it at the beginning so it goes before FileFinder
FINDER = PyLibImportFinder()
sys.meta_path.insert(0, FINDER)
def init_loader(download_dir=None, install_dir=None, index_url='https://pypi.org/simple/', python_version=None,
install_dependencies=False, reset_modules=True, clean_modules=False, contained_modules=None, **kwargs):
"""Create the general global importer."""
global LOADER
LOADER = PyLibImportLoader(download_dir=download_dir,
install_dir=install_dir,
index_url=index_url,
python_version=python_version,
install_dependencies=install_dependencies,
reset_modules=reset_modules,
clean_modules=clean_modules,
contained_modules=contained_modules, **kwargs)
return LOADER
def loader():
"""Return the global VersionImporter/Loader."""
global LOADER
return LOADER
def import_name_to_name_version(import_name):
"""Convert the import name to a name and version.
Args:
import_name (str): Import name with a version (Ex: "custom_0_0_0")
Returns:
name (str): Just the package name (Ex: "custom")
version (str)[None]: Version of the package (Ex: "0.0.0")
"""
s = str(import_name).split('_')
for i, n in enumerate(s):
if len(n) > 0 and n.isdigit():
return '_'.join(s[:i]), '.'.join(s[i:])
return import_name, None
class PyLibImportFinder(MetaPathFinder):
@classmethod
def find_spec(cls, full_name, paths=None, target=None):
imp = loader()
name, version = import_name_to_name_version(full_name)
if imp is None or version is None:
return None
# Check if name and version is available to the importer
name, version, import_name, path = imp.find_module(name, version)
if path is not None:
import_path = imp.make_import_path(name, version)
return ModuleSpec(import_name, imp, origin=import_path)
class PyLibImportLoader(VersionImporter, Loader):
@classmethod
def exec_module(cls, module):
imp = module.__spec__.loader
import_name = module.__spec__.name
import_path = module.__spec__.origin
name, version = import_name_to_name_version(import_name)
try:
imp.import_module(name, version) # If success will be sys.modules["custom_0_0_0"]
module.__path__ = [import_path]
except (ImportError, Exception):
pass | 0.480722 | 0.075007 |
import setuptools
long_description = """# Pandas Bokeh
**Pandas Bokeh** provides a [Bokeh](https://bokeh.pydata.org/en/latest/) plotting backend for [Pandas](https://pandas.pydata.org/) and [GeoPandas](http://geopandas.org/), similar to the already existing [Visualization](https://pandas.pydata.org/pandas-docs/stable/visualization.html) feature of Pandas. Importing the library adds a complementary plotting method ***plot_bokeh()*** on **DataFrames** and **Series**. It also has native plotting backend support for Pandas >= 0.25.
For more information and examples have a look at the [Github Repository](https://github.com/PatrikHlobil/Pandas-Bokeh).
---
## Installation
You can install **Pandas Bokeh** from *PyPI* via **pip**:
pip install pandas-bokeh
or *conda*:
conda install -c patrikhlobil pandas-bokeh
**Pandas Bokeh** is officially supported on Python 3.5 and above.
---
## Description
With **Pandas Bokeh**, creating stunning, interactive, HTML-based visualization is as easy as calling:
```python
df.plot_bokeh()
```
In release **0.3**, the following plot types are supported:
* line
* step
* point
* scatter
* bar
* histogram
* area
* pie
* mapplot
<br>
Furthermore, also **GeoPandas** and **Pyspark** have a new plotting backend as can be seen in the provided [examples](https://github.com/PatrikHlobil/Pandas-Bokeh#geoplots).
<br>
**Pandas Bokeh** is a high-level API for **Bokeh** on top of **Pandas** and **GeoPandas** that tries to figure out best, what the user wants to plot. Nevertheless, there are many options for customizing the plots, for example:
* **figsize**: Choose width & height of the plot
* **title**: Sets title of the plot
* **xlim**/**ylim**: Set visible range of plot for x- and y-axis (also works for *datetime x-axis*)
* **xlabel**/**ylabel**: Set x- and y-labels
* **logx**/**logy**: Set log-scale on x-/y-axis
* **xticks**/**yticks**: Explicitly set the ticks on the axes
* **colormap**: Defines the colors to plot. Can be either a list of colors or the name of a [Bokeh color palette](https://bokeh.pydata.org/en/latest/docs/reference/palettes.html)
* **hovertool_string**: For customization of hovertool content
Each plot type like scatterplot or histogram further has many more additional customization options that is described [here](https://github.com/PatrikHlobil/Pandas-Bokeh).
"""
import pandas_bokeh
version = pandas_bokeh.__version__
setuptools.setup(
name="pandas-bokeh",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Bokeh plotting backend for Pandas, GeoPandas & Pyspark",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PatrikHlobil/Pandas-Bokeh",
packages=setuptools.find_packages(),
install_requires=["bokeh >=0.13", "pandas >=0.22.0"],
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization'
],
python_requires=">=3.5"
) | setup.py | df.plot_bokeh() | 0.346541 | 0.963437 |
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import os
import os
import logging
import pandas as pd
from hansel import Crumb
from invoke import task
from boyle.files.search import recursive_glob
from neuro_pypes.run import run_debug, run_wf
log = logging.getLogger()
# to get std_brains with some atlases and templates:
# git clone https://github.com/Neurita/std_brains.git
STDB_DIR = '/home/hansel/data/std_brains'
HAMM_DIR = os.path.join(STDB_DIR, 'atlases', 'hammers')
HAMM_MNI = os.path.join(HAMM_DIR, 'Hammers_mith_atlas_n30r83_SPM5.nii.gz')
SPM_CANONICAL_BRAIN_2MM = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'single_subj_T1_brain.nii.gz')
def verbose_switch(verbose=False):
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.getLogger().setLevel(log_level)
@task
def decompress_dicoms(ctx, input_dir):
""" Decompress all *.dcm files recursively found in DICOM_DIR.
This uses 'gdcmconv --raw'.
It works when 'dcm2nii' shows the `Unsupported Transfer Syntax` error. This error is
usually caused by lack of JPEG2000 support in dcm2nii compilation.
Read more:
http://www.nitrc.org/plugins/mwiki/index.php/dcm2nii:MainPage#Transfer_Syntaxes_and_Compressed_Images
Parameters
----------
input_dir: str
Folder path
Notes
-----
The *.dcm files in `input_folder` will be overwritten.
"""
import subprocess
dcmfiles = sorted(recursive_glob(input_dir, '*.dcm'))
for dcm in dcmfiles:
cmd = 'gdcmconv --raw -i "{0}" -o "{0}"'.format(dcm)
log.debug('Calling {}.'.format(cmd))
try:
subprocess.check_call(cmd, shell=True)
except:
pass
@task
def dcm2nii(ctx, input_crumb_path, output_dir, regex='fnmatch', ncpus=3):
""" Convert all DICOM files within `input_crumb_path` into NifTI in `output_folder`.
Will copy only the NifTI files reoriented by MRICron's dcm2nii command.
Will rename the NifTI files that are matched with recognized modalities to the short
modality name from config.ACQ_PATTERNS.
Parameters
----------
input_dir: str
A crumb path str indicating the whole path until the DICOM files.
Example: '/home/hansel/data/{group}/{subj_id}/{session_id}/{acquisition}/{dcm_file}
The crumb argument just before the last one will be used as folder container reference
for the DICOM series.
output_dir: str
The root folder path where to save the tree of nifti files.
Example: '/home/hansel/nifti'
This function will create the same tree as the crumbs in input_crumb_path, hence
for the example above the output would have the following structure:
'/home/hansel/nifti/{group}/{subj_id}/{session_id}/{nifti_file}'
Where {nifti_file} will take the name from the {acquisition} or from the
patterns in ACQ_PATTERNS in `config.py` file.
regex: str
The regular expression syntax you may want to set in the Crumbs.
See hansel.Crumb documentation for this.
ncpus: int
this says the number of processes that will be launched for dcm2nii in parallel.
"""
from boyle.dicom.convert import convert_dcm2nii
input_dir = os.path.expanduser(input_crumb_path)
output_dir = os.path.expanduser(output_dir)
if not os.path.exists(output_dir):
log.info('Creating output folder {}.'.format(output_dir))
os.makedirs(output_dir)
else:
log.info('Output folder {} already exists, this will overwrite/merge '
'whatever is inside.'.format(output_dir))
input_dir = Crumb(input_dir, regex=regex, ignore_list=['.*'])
if not input_dir.has_crumbs():
raise ValueError('I am almost sure that this cannot work if you do not '
'use crumb arguments in the input path, got {}.'.format(input_dir))
acq_folder_arg, last_in_arg = tuple(input_dir.all_args())[-2:]
out_arg_names = ['{' + arg + '}' for arg in tuple(input_dir.all_args())[:-1]]
output_dir = Crumb(os.path.join(output_dir, *out_arg_names), regex=regex, ignore_list=['.*'])
src_dst = []
acquisitions = input_dir.ls(acq_folder_arg, make_crumbs=True)
for acq in acquisitions:
out_args = acq.arg_values.copy()
acq_out = output_dir.replace(**out_args)
out_dir = os.path.dirname (acq_out.path)
out_file = os.path.basename(acq_out.path) + '.nii.gz'
os.makedirs(out_dir, exist_ok=True)
src_dst.append((acq.split()[0], out_dir, out_file))
if ncpus > 1:
import multiprocessing as mp
pool = mp.Pool(processes=ncpus)
results = [pool.apply_async(convert_dcm2nii, args=(dr, ss, dst)) for dr, ss, dst in src_dst]
_ = [p.get() for p in results]
else:
_ = [convert_dcm2nii(path, sess, dst) for path, sess, dst in src_dst]
@task
def clinical_pype(ctx, wf_name="spm_anat_preproc", base_dir="",
cache_dir="", output_dir="", settings_file='',
plugin="MultiProc", n_cpus=4):
""" Run the basic pipeline.
Parameters
----------
wf_name: str
base_dir: str
cache_dir: str
output_dir: str
year: str or int
plugin: str
n_cpus: int
"""
from neuro_neuro_pypes.datasets import clinical_crumb_workflow
data_path = os.path.join(os.path.expanduser(base_dir), '{year}', '{subject_id}', '{session_id}', '{image}')
data_crumb = Crumb(data_path, ignore_list=['.*'])
atlas_file = HAMM_MNI
wf = clinical_crumb_workflow(wf_name = wf_name,
data_crumb = data_crumb,
cache_dir = os.path.abspath(os.path.expanduser(cache_dir)) if cache_dir else '',
output_dir = os.path.abspath(os.path.expanduser(output_dir)) if output_dir else '',
config_file = settings_file,
params={'atlas_file': atlas_file},
)
if n_cpus > 1:
run_wf(wf, plugin=plugin, n_cpus=n_cpus)
else:
run_wf(wf, plugin=None)
@task
def run_canica(ctx, input_crumb, output_dir, cache_dir="", mask_file="", algorithm='canica', comps=30, smooth_fwhm=8,
wf_name="", settings_file=""):
""" Perform ICA (CanICA or DictLearning) on the files given by `input_crumb`.
Parameters
----------
input_crumb: str
Crumb path that will give a list of the input files for ICA.
The last open argument and its pattern of the `input_crumb` will be used as a reference for the input image
file for the ICA. So, put a crumb argument with fixed expression in the basename of the path, e.g.:
`/home/hansel/cobre/{sid}/session_0/{img:rest.nii.gz}`.
mask_file: str
Path to a mask file to select the image regions that
This file must have the same dimensions as all the files listed from `input_crumb`.
algorithm: str
Name of the ICA algorithme.
Choices: 'canica', 'dictlearning'
comps: int
Number of components to extract from the ICA.
Outputs
-------
The results will be stored in `output_dir`.
"""
from functools import partial
from neuro_pypes.config import update_config
from neuro_pypes.io import build_crumb_workflow
from neuro_pypes.ica import attach_concat_canica
# set the configuration parameters
if settings_file:
update_config(settings_file)
# expanduser in inputs paths:
cache_dir = os.path.expanduser(cache_dir)
output_dir = os.path.expanduser(output_dir)
if not cache_dir:
cache_dir = os.path.join(output_dir, '.pypes_cache')
# base folder depending if using MR-PET pipeline or PET-only
data_crumb = Crumb(input_crumb, ignore_list=['.*'])
# more configs
if not wf_name:
wf_name = algorithm
if comps:
update_config({wf_name + '_ica.n_components' : comps})
update_config({wf_name + '_ica.algorithm': algorithm})
update_config({wf_name + '_ica.mask': mask_file})
update_config({wf_name + '_ica.smoothing_fwhm': smooth_fwhm})
update_config({wf_name + '_ica.do_cca': True})
update_config({wf_name + '_ica.standardize': True})
update_config({wf_name + '_ica.n_init': 20})
update_config({wf_name + '_ica.n_jobs': -1})
update_config({'plot_ica.bg_img': SPM_CANONICAL_BRAIN_2MM})
# the input folder and files
files_crumb_args = {}
_, arg_name = data_crumb._last_open_arg()
files_crumb_args['input_img'] = [(arg_name, data_crumb.patterns.get(arg_name, ""))]
kwargs = dict()
kwargs['input_connection'] = 'input_img'
kwargs['input_node'] = 'selectfiles'
# build the workflow
wf = build_crumb_workflow({wf_name: partial(attach_concat_canica, **kwargs)},
data_crumb=data_crumb,
in_out_kwargs=files_crumb_args,
output_dir=output_dir,
cache_dir=cache_dir,)
wf.remove_nodes([wf.get_node('datasink')])
run_wf(wf)
@task
def run_gift(ctx, input_dir, output_dir, mask_file, zscore_plot=2):
""" Perform MIALAB's GIFT InfoMax.
Uses the gift_batch_template.m file to create the GIFT batch input.
Parameters
----------
input_dir: str
output_dir: str
mask_file: str
zscore_plot: float
Examples
--------
$ inv run_gift -i /home/hansel/data/thomas/ica_in -o /home/hansel/data/thomas/ica_out
Outputs
-------
The results will be stored in output_dir/{algorithm}_{preproc_type}_{group}
...
"""
import os
import io
import os
import subprocess
from jinja2 import Template
from neuro_pypes.ica import plot_ica_results
tmp_file = 'gift_batch_template.m'
tmp_str = Template(io.open(tmp_file).read())
tmp_str = tmp_str.render(input_dir=input_dir,
output_dir=output_dir,
mask_file=mask_file)
batch_file = os.path.abspath('gift_filled_template.m')
io.open(batch_file, 'w').write(tmp_str)
cmd = 'matlab -nodesktop -nosplash -r "icatb_batch_file_run(\'{}\'); exit();"'.format(batch_file)
print(cmd)
subprocess.check_call(cmd, shell=True)
os.remove(batch_file)
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(output_dir, application='gift',
mask_file=mask_file, zscore=zscore_plot, bg_img=bg_img)
@task
def run_sbm(ctx, input_dir, output_dir, mask_file, zscore_plot=2):
""" Perform MIALAB's SBM InfoMax.
Uses the sbm_batch_template.m file to create the SBM batch input.
Parameters
----------
input_dir: str
output_dir: str
mask_file: str
zscore_plot: float
Examples
--------
$ inv run_sbm -i /home/hansel/data/thomas/ica_in -o /home/hansel/data/thomas/ica_out
Outputs
-------
The results will be stored in output_dir/{algorithm}_{preproc_type}_{group}
...
"""
import os
import io
import os
import subprocess
from jinja2 import Template
from neuro_pypes.ica import plot_ica_results
input_glob = os.path.join(input_dir, '*.nii')
tmp_file = 'sbm_batch_template.m'
tmp_str = Template(io.open(tmp_file).read())
tmp_str = tmp_str.render(input_glob=input_glob,
output_dir=output_dir,
out_prefix='sbm_',
mask_file=mask_file)
batch_file = os.path.abspath('sbm_filled_template.m')
io.open(batch_file, 'w').write(tmp_str)
cmd = 'matlab -nodesktop -nosplash -r "icatb_batch_file_run(\'{}\'); exit();"'.format(batch_file)
print(cmd)
subprocess.check_call(cmd, shell=True)
os.remove(batch_file)
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(output_dir, application='sbm', mask_file=mask_file,
zscore=zscore_plot, bg_img=bg_img)
@task
def cobre_pype(ctx, wf_name="spm_anat_rest_preproc", base_dir="", cache_dir="", output_dir="", settings_file="",
plugin=None, n_cpus=4):
""" Run the
ParametersA
----------
wf_name: str
base_dir: str
Base path to where the data is
cache_dir: str
output_dir: str
year: str or int
plugin: str
n_cpus: int
"""
from neuro_pypes.datasets import cobre_crumb_workflow
data_path = os.path.join(os.path.expanduser(base_dir), '{subject_id}', 'session_1', '{modality}', '{image}')
data_crumb = Crumb(data_path, ignore_list=['.*'])
wf = cobre_crumb_workflow(wf_name = wf_name,
data_crumb = data_crumb,
cache_dir = os.path.abspath(os.path.expanduser(cache_dir)) if cache_dir else '',
output_dir = os.path.abspath(os.path.expanduser(output_dir)) if output_dir else '',
config_file = settings_file,
params={'atlas_file': HAMM_MNI},
)
run_wf(wf, plugin=plugin, n_cpus=n_cpus)
@task(autoprint=True)
def plot_ica_results(ctx, ica_result, application, mask_file='', mode='+-', zscore=0, bg_img=None):
""" Use nilearn through pypes to plot results from CanICA, DictLearning, MIALAB GIFT or SBM,
given the ICA result folder path.
Parameters
----------
ica_result: str
Path to the ICA output folder or the ICA components volume file.
application: str
Choicese: ('canica', 'sbm', 'gift', 'gift-group')
mask_file: str
Path to the brain mask file to be used for thresholding.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
zscore: int
Value of the Z-score thresholding.
bg_img: str
Path to a background image.
If empty will use the SPM canonical brain image at 2mm.
"""
from neuro_pypes.ica import plot_ica_results
if bg_img is None:
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(ica_result,
application=application,
mask_file=mask_file,
zscore=float(zscore),
mode=mode,
bg_img=bg_img)
@task
def motion_stats_sheet(ctx, motion_file_cr, crumb_fields, out_path):
""" Create in `out_path` an Excel spreadsheet with some of the motion statistics obtained from the
`statistics_files` output of the nipype.RapidArt found in the hansel.Crumb `motion_file_cr`.
Parameters
----------
motion_file_cr: str
crumb_fields: list of str
out_path: str
Examples
--------
>>> inv motion_stats_sheet \
>>> --motion-file-cr "/home/hansel/data/out/{group}/{patient_id}/{session}/rest/artifact_stats/motion_stats.json" \
>>> --crumb-fields "['group', 'patient_id', 'session']" \
>>> --out-path "/home/hansel/data/motion_stats.xls"
"""
import json
from collections import OrderedDict
from hansel import Crumb
def get_motion_record(mtn_file_cr, crumb_fields):
""" Return an OrderedDict of the information found in the `mtn_file_cr` and also
`crumb_fields` Crumb argument values."""
stats = json.load(open(str(mtn_file_cr)))
outliers = stats[1]
motion_norm = stats[3]['motion_norm']
#outliers_hdr = list(outliers.keys())
motion_hdr = ['{}_motion_norm'.format(k) for k in motion_norm.keys()]
mtn_record = OrderedDict()
for fn in crumb_fields:
mtn_record[fn] = mtn_file_cr[fn][0]
mtn_record.update(outliers)
for hdr, fn in zip(motion_hdr, motion_norm):
mtn_record[hdr] = motion_norm[fn]
return mtn_record
# process the input
motion_file_cr = Crumb(motion_file_cr)
crumb_fields = [crf.strip() for crf in crumb_fields[1:-1].replace("'", "").split(',')]
# create the motion records
motionstats = [get_motion_record(stats_file, crumb_fields) for stats_file in motion_file_cr.ls()]
# create a pandas Dataframe out of it
df = pd.DataFrame.from_records(motionstats, columns=motionstats[0].keys())
# save it into an excel file
df.to_excel(out_path)
@task
def ica_sbm_loadings_sheet(ctx, ica_out_dir, labels_file="", mask="", bg_img=None, zscore=2.,
subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):
"""
Save the Excel loadings files in the `ica_out_dir`.
One file is `subject_loadings.xls` which has the loadings as is, with the subjects IDs and group.
The other file is `subject_group_loadings.xls` which has the loading signs changed according to
the average correlation value of the "main" region of each of the IC spatial maps.
Parameters
----------
ica_out_dir: str
Path to the SBM ICA analysis output folder.
labels_file: str
A CSV file with two columns: "subject_id" and "group".
The subject_ids must be in the paths contained in the Subject.mat
file and match the `subjid_pat` argument.
mask: str
Path to a mask file to select only brain area from the IC spatial map.
bg_img: str
A background image for the blob plots check report, to verify that the blobs
taken into account for the loadings signs are correct.
zscore: float
Value to threshold the IC spatial maps to obtain the IC spatial map "main" region.
subjid_pat: regext str
A search regex pattern that returns one group element that
contains the subject id.
This will be used to search for subject_id in the file paths
contained in the Subjects.mat file.
"""
from neuro_pypes.ica.plotting import SBMICAResultsPlotter
rawloadings_filename = 'subject_loadings.xls'
grouploadings_filename = 'subject_weighted_loadings.xls'
check_blob_plot = 'check_sign_blobs.png'
plotter = SBMICAResultsPlotter(ica_out_dir)
plotter.fit(mask_file=mask, mode='+-', zscore=zscore)
# generate and save the simple loadings sheet
sdf = plotter.simple_loadings_df(group_labels_file=labels_file, subjid_pat=subjid_pat)
sdf.to_excel(os.path.join(ica_out_dir, rawloadings_filename))
# generate and save the group-processed loadings sheet
pdf = plotter.weighted_loadings_df(group_labels_file=labels_file, subjid_pat=subjid_pat)
pdf.to_excel(os.path.join(ica_out_dir, grouploadings_filename))
# plot blobs over IC maps for checking
check_blob_plot = os.path.join(ica_out_dir, check_blob_plot)
plotter.plot_icmaps_and_blobs(check_blob_plot, bg_img=bg_img) | examples/pipelines.py | from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import os
import os
import logging
import pandas as pd
from hansel import Crumb
from invoke import task
from boyle.files.search import recursive_glob
from neuro_pypes.run import run_debug, run_wf
log = logging.getLogger()
# to get std_brains with some atlases and templates:
# git clone https://github.com/Neurita/std_brains.git
STDB_DIR = '/home/hansel/data/std_brains'
HAMM_DIR = os.path.join(STDB_DIR, 'atlases', 'hammers')
HAMM_MNI = os.path.join(HAMM_DIR, 'Hammers_mith_atlas_n30r83_SPM5.nii.gz')
SPM_CANONICAL_BRAIN_2MM = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'single_subj_T1_brain.nii.gz')
def verbose_switch(verbose=False):
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.getLogger().setLevel(log_level)
@task
def decompress_dicoms(ctx, input_dir):
""" Decompress all *.dcm files recursively found in DICOM_DIR.
This uses 'gdcmconv --raw'.
It works when 'dcm2nii' shows the `Unsupported Transfer Syntax` error. This error is
usually caused by lack of JPEG2000 support in dcm2nii compilation.
Read more:
http://www.nitrc.org/plugins/mwiki/index.php/dcm2nii:MainPage#Transfer_Syntaxes_and_Compressed_Images
Parameters
----------
input_dir: str
Folder path
Notes
-----
The *.dcm files in `input_folder` will be overwritten.
"""
import subprocess
dcmfiles = sorted(recursive_glob(input_dir, '*.dcm'))
for dcm in dcmfiles:
cmd = 'gdcmconv --raw -i "{0}" -o "{0}"'.format(dcm)
log.debug('Calling {}.'.format(cmd))
try:
subprocess.check_call(cmd, shell=True)
except:
pass
@task
def dcm2nii(ctx, input_crumb_path, output_dir, regex='fnmatch', ncpus=3):
""" Convert all DICOM files within `input_crumb_path` into NifTI in `output_folder`.
Will copy only the NifTI files reoriented by MRICron's dcm2nii command.
Will rename the NifTI files that are matched with recognized modalities to the short
modality name from config.ACQ_PATTERNS.
Parameters
----------
input_dir: str
A crumb path str indicating the whole path until the DICOM files.
Example: '/home/hansel/data/{group}/{subj_id}/{session_id}/{acquisition}/{dcm_file}
The crumb argument just before the last one will be used as folder container reference
for the DICOM series.
output_dir: str
The root folder path where to save the tree of nifti files.
Example: '/home/hansel/nifti'
This function will create the same tree as the crumbs in input_crumb_path, hence
for the example above the output would have the following structure:
'/home/hansel/nifti/{group}/{subj_id}/{session_id}/{nifti_file}'
Where {nifti_file} will take the name from the {acquisition} or from the
patterns in ACQ_PATTERNS in `config.py` file.
regex: str
The regular expression syntax you may want to set in the Crumbs.
See hansel.Crumb documentation for this.
ncpus: int
this says the number of processes that will be launched for dcm2nii in parallel.
"""
from boyle.dicom.convert import convert_dcm2nii
input_dir = os.path.expanduser(input_crumb_path)
output_dir = os.path.expanduser(output_dir)
if not os.path.exists(output_dir):
log.info('Creating output folder {}.'.format(output_dir))
os.makedirs(output_dir)
else:
log.info('Output folder {} already exists, this will overwrite/merge '
'whatever is inside.'.format(output_dir))
input_dir = Crumb(input_dir, regex=regex, ignore_list=['.*'])
if not input_dir.has_crumbs():
raise ValueError('I am almost sure that this cannot work if you do not '
'use crumb arguments in the input path, got {}.'.format(input_dir))
acq_folder_arg, last_in_arg = tuple(input_dir.all_args())[-2:]
out_arg_names = ['{' + arg + '}' for arg in tuple(input_dir.all_args())[:-1]]
output_dir = Crumb(os.path.join(output_dir, *out_arg_names), regex=regex, ignore_list=['.*'])
src_dst = []
acquisitions = input_dir.ls(acq_folder_arg, make_crumbs=True)
for acq in acquisitions:
out_args = acq.arg_values.copy()
acq_out = output_dir.replace(**out_args)
out_dir = os.path.dirname (acq_out.path)
out_file = os.path.basename(acq_out.path) + '.nii.gz'
os.makedirs(out_dir, exist_ok=True)
src_dst.append((acq.split()[0], out_dir, out_file))
if ncpus > 1:
import multiprocessing as mp
pool = mp.Pool(processes=ncpus)
results = [pool.apply_async(convert_dcm2nii, args=(dr, ss, dst)) for dr, ss, dst in src_dst]
_ = [p.get() for p in results]
else:
_ = [convert_dcm2nii(path, sess, dst) for path, sess, dst in src_dst]
@task
def clinical_pype(ctx, wf_name="spm_anat_preproc", base_dir="",
cache_dir="", output_dir="", settings_file='',
plugin="MultiProc", n_cpus=4):
""" Run the basic pipeline.
Parameters
----------
wf_name: str
base_dir: str
cache_dir: str
output_dir: str
year: str or int
plugin: str
n_cpus: int
"""
from neuro_neuro_pypes.datasets import clinical_crumb_workflow
data_path = os.path.join(os.path.expanduser(base_dir), '{year}', '{subject_id}', '{session_id}', '{image}')
data_crumb = Crumb(data_path, ignore_list=['.*'])
atlas_file = HAMM_MNI
wf = clinical_crumb_workflow(wf_name = wf_name,
data_crumb = data_crumb,
cache_dir = os.path.abspath(os.path.expanduser(cache_dir)) if cache_dir else '',
output_dir = os.path.abspath(os.path.expanduser(output_dir)) if output_dir else '',
config_file = settings_file,
params={'atlas_file': atlas_file},
)
if n_cpus > 1:
run_wf(wf, plugin=plugin, n_cpus=n_cpus)
else:
run_wf(wf, plugin=None)
@task
def run_canica(ctx, input_crumb, output_dir, cache_dir="", mask_file="", algorithm='canica', comps=30, smooth_fwhm=8,
wf_name="", settings_file=""):
""" Perform ICA (CanICA or DictLearning) on the files given by `input_crumb`.
Parameters
----------
input_crumb: str
Crumb path that will give a list of the input files for ICA.
The last open argument and its pattern of the `input_crumb` will be used as a reference for the input image
file for the ICA. So, put a crumb argument with fixed expression in the basename of the path, e.g.:
`/home/hansel/cobre/{sid}/session_0/{img:rest.nii.gz}`.
mask_file: str
Path to a mask file to select the image regions that
This file must have the same dimensions as all the files listed from `input_crumb`.
algorithm: str
Name of the ICA algorithme.
Choices: 'canica', 'dictlearning'
comps: int
Number of components to extract from the ICA.
Outputs
-------
The results will be stored in `output_dir`.
"""
from functools import partial
from neuro_pypes.config import update_config
from neuro_pypes.io import build_crumb_workflow
from neuro_pypes.ica import attach_concat_canica
# set the configuration parameters
if settings_file:
update_config(settings_file)
# expanduser in inputs paths:
cache_dir = os.path.expanduser(cache_dir)
output_dir = os.path.expanduser(output_dir)
if not cache_dir:
cache_dir = os.path.join(output_dir, '.pypes_cache')
# base folder depending if using MR-PET pipeline or PET-only
data_crumb = Crumb(input_crumb, ignore_list=['.*'])
# more configs
if not wf_name:
wf_name = algorithm
if comps:
update_config({wf_name + '_ica.n_components' : comps})
update_config({wf_name + '_ica.algorithm': algorithm})
update_config({wf_name + '_ica.mask': mask_file})
update_config({wf_name + '_ica.smoothing_fwhm': smooth_fwhm})
update_config({wf_name + '_ica.do_cca': True})
update_config({wf_name + '_ica.standardize': True})
update_config({wf_name + '_ica.n_init': 20})
update_config({wf_name + '_ica.n_jobs': -1})
update_config({'plot_ica.bg_img': SPM_CANONICAL_BRAIN_2MM})
# the input folder and files
files_crumb_args = {}
_, arg_name = data_crumb._last_open_arg()
files_crumb_args['input_img'] = [(arg_name, data_crumb.patterns.get(arg_name, ""))]
kwargs = dict()
kwargs['input_connection'] = 'input_img'
kwargs['input_node'] = 'selectfiles'
# build the workflow
wf = build_crumb_workflow({wf_name: partial(attach_concat_canica, **kwargs)},
data_crumb=data_crumb,
in_out_kwargs=files_crumb_args,
output_dir=output_dir,
cache_dir=cache_dir,)
wf.remove_nodes([wf.get_node('datasink')])
run_wf(wf)
@task
def run_gift(ctx, input_dir, output_dir, mask_file, zscore_plot=2):
""" Perform MIALAB's GIFT InfoMax.
Uses the gift_batch_template.m file to create the GIFT batch input.
Parameters
----------
input_dir: str
output_dir: str
mask_file: str
zscore_plot: float
Examples
--------
$ inv run_gift -i /home/hansel/data/thomas/ica_in -o /home/hansel/data/thomas/ica_out
Outputs
-------
The results will be stored in output_dir/{algorithm}_{preproc_type}_{group}
...
"""
import os
import io
import os
import subprocess
from jinja2 import Template
from neuro_pypes.ica import plot_ica_results
tmp_file = 'gift_batch_template.m'
tmp_str = Template(io.open(tmp_file).read())
tmp_str = tmp_str.render(input_dir=input_dir,
output_dir=output_dir,
mask_file=mask_file)
batch_file = os.path.abspath('gift_filled_template.m')
io.open(batch_file, 'w').write(tmp_str)
cmd = 'matlab -nodesktop -nosplash -r "icatb_batch_file_run(\'{}\'); exit();"'.format(batch_file)
print(cmd)
subprocess.check_call(cmd, shell=True)
os.remove(batch_file)
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(output_dir, application='gift',
mask_file=mask_file, zscore=zscore_plot, bg_img=bg_img)
@task
def run_sbm(ctx, input_dir, output_dir, mask_file, zscore_plot=2):
""" Perform MIALAB's SBM InfoMax.
Uses the sbm_batch_template.m file to create the SBM batch input.
Parameters
----------
input_dir: str
output_dir: str
mask_file: str
zscore_plot: float
Examples
--------
$ inv run_sbm -i /home/hansel/data/thomas/ica_in -o /home/hansel/data/thomas/ica_out
Outputs
-------
The results will be stored in output_dir/{algorithm}_{preproc_type}_{group}
...
"""
import os
import io
import os
import subprocess
from jinja2 import Template
from neuro_pypes.ica import plot_ica_results
input_glob = os.path.join(input_dir, '*.nii')
tmp_file = 'sbm_batch_template.m'
tmp_str = Template(io.open(tmp_file).read())
tmp_str = tmp_str.render(input_glob=input_glob,
output_dir=output_dir,
out_prefix='sbm_',
mask_file=mask_file)
batch_file = os.path.abspath('sbm_filled_template.m')
io.open(batch_file, 'w').write(tmp_str)
cmd = 'matlab -nodesktop -nosplash -r "icatb_batch_file_run(\'{}\'); exit();"'.format(batch_file)
print(cmd)
subprocess.check_call(cmd, shell=True)
os.remove(batch_file)
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(output_dir, application='sbm', mask_file=mask_file,
zscore=zscore_plot, bg_img=bg_img)
@task
def cobre_pype(ctx, wf_name="spm_anat_rest_preproc", base_dir="", cache_dir="", output_dir="", settings_file="",
plugin=None, n_cpus=4):
""" Run the
ParametersA
----------
wf_name: str
base_dir: str
Base path to where the data is
cache_dir: str
output_dir: str
year: str or int
plugin: str
n_cpus: int
"""
from neuro_pypes.datasets import cobre_crumb_workflow
data_path = os.path.join(os.path.expanduser(base_dir), '{subject_id}', 'session_1', '{modality}', '{image}')
data_crumb = Crumb(data_path, ignore_list=['.*'])
wf = cobre_crumb_workflow(wf_name = wf_name,
data_crumb = data_crumb,
cache_dir = os.path.abspath(os.path.expanduser(cache_dir)) if cache_dir else '',
output_dir = os.path.abspath(os.path.expanduser(output_dir)) if output_dir else '',
config_file = settings_file,
params={'atlas_file': HAMM_MNI},
)
run_wf(wf, plugin=plugin, n_cpus=n_cpus)
@task(autoprint=True)
def plot_ica_results(ctx, ica_result, application, mask_file='', mode='+-', zscore=0, bg_img=None):
""" Use nilearn through pypes to plot results from CanICA, DictLearning, MIALAB GIFT or SBM,
given the ICA result folder path.
Parameters
----------
ica_result: str
Path to the ICA output folder or the ICA components volume file.
application: str
Choicese: ('canica', 'sbm', 'gift', 'gift-group')
mask_file: str
Path to the brain mask file to be used for thresholding.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
zscore: int
Value of the Z-score thresholding.
bg_img: str
Path to a background image.
If empty will use the SPM canonical brain image at 2mm.
"""
from neuro_pypes.ica import plot_ica_results
if bg_img is None:
bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)
return plot_ica_results(ica_result,
application=application,
mask_file=mask_file,
zscore=float(zscore),
mode=mode,
bg_img=bg_img)
@task
def motion_stats_sheet(ctx, motion_file_cr, crumb_fields, out_path):
""" Create in `out_path` an Excel spreadsheet with some of the motion statistics obtained from the
`statistics_files` output of the nipype.RapidArt found in the hansel.Crumb `motion_file_cr`.
Parameters
----------
motion_file_cr: str
crumb_fields: list of str
out_path: str
Examples
--------
>>> inv motion_stats_sheet \
>>> --motion-file-cr "/home/hansel/data/out/{group}/{patient_id}/{session}/rest/artifact_stats/motion_stats.json" \
>>> --crumb-fields "['group', 'patient_id', 'session']" \
>>> --out-path "/home/hansel/data/motion_stats.xls"
"""
import json
from collections import OrderedDict
from hansel import Crumb
def get_motion_record(mtn_file_cr, crumb_fields):
""" Return an OrderedDict of the information found in the `mtn_file_cr` and also
`crumb_fields` Crumb argument values."""
stats = json.load(open(str(mtn_file_cr)))
outliers = stats[1]
motion_norm = stats[3]['motion_norm']
#outliers_hdr = list(outliers.keys())
motion_hdr = ['{}_motion_norm'.format(k) for k in motion_norm.keys()]
mtn_record = OrderedDict()
for fn in crumb_fields:
mtn_record[fn] = mtn_file_cr[fn][0]
mtn_record.update(outliers)
for hdr, fn in zip(motion_hdr, motion_norm):
mtn_record[hdr] = motion_norm[fn]
return mtn_record
# process the input
motion_file_cr = Crumb(motion_file_cr)
crumb_fields = [crf.strip() for crf in crumb_fields[1:-1].replace("'", "").split(',')]
# create the motion records
motionstats = [get_motion_record(stats_file, crumb_fields) for stats_file in motion_file_cr.ls()]
# create a pandas Dataframe out of it
df = pd.DataFrame.from_records(motionstats, columns=motionstats[0].keys())
# save it into an excel file
df.to_excel(out_path)
@task
def ica_sbm_loadings_sheet(ctx, ica_out_dir, labels_file="", mask="", bg_img=None, zscore=2.,
subjid_pat=r'(?P<patid>[a-z]{2}_[0-9]{6})'):
"""
Save the Excel loadings files in the `ica_out_dir`.
One file is `subject_loadings.xls` which has the loadings as is, with the subjects IDs and group.
The other file is `subject_group_loadings.xls` which has the loading signs changed according to
the average correlation value of the "main" region of each of the IC spatial maps.
Parameters
----------
ica_out_dir: str
Path to the SBM ICA analysis output folder.
labels_file: str
A CSV file with two columns: "subject_id" and "group".
The subject_ids must be in the paths contained in the Subject.mat
file and match the `subjid_pat` argument.
mask: str
Path to a mask file to select only brain area from the IC spatial map.
bg_img: str
A background image for the blob plots check report, to verify that the blobs
taken into account for the loadings signs are correct.
zscore: float
Value to threshold the IC spatial maps to obtain the IC spatial map "main" region.
subjid_pat: regext str
A search regex pattern that returns one group element that
contains the subject id.
This will be used to search for subject_id in the file paths
contained in the Subjects.mat file.
"""
from neuro_pypes.ica.plotting import SBMICAResultsPlotter
rawloadings_filename = 'subject_loadings.xls'
grouploadings_filename = 'subject_weighted_loadings.xls'
check_blob_plot = 'check_sign_blobs.png'
plotter = SBMICAResultsPlotter(ica_out_dir)
plotter.fit(mask_file=mask, mode='+-', zscore=zscore)
# generate and save the simple loadings sheet
sdf = plotter.simple_loadings_df(group_labels_file=labels_file, subjid_pat=subjid_pat)
sdf.to_excel(os.path.join(ica_out_dir, rawloadings_filename))
# generate and save the group-processed loadings sheet
pdf = plotter.weighted_loadings_df(group_labels_file=labels_file, subjid_pat=subjid_pat)
pdf.to_excel(os.path.join(ica_out_dir, grouploadings_filename))
# plot blobs over IC maps for checking
check_blob_plot = os.path.join(ica_out_dir, check_blob_plot)
plotter.plot_icmaps_and_blobs(check_blob_plot, bg_img=bg_img) | 0.604983 | 0.144571 |
import unittest
import avltree_eaf3d as bst
import eaf3D
import numpy as np
from operator import attrgetter
from stack import Stack
class AVLTreeTestsFromStarter(unittest.TestCase):
def setUp(self):
# Set initial points for sentinels (to simulate infinity)
big_pos_value = 10E10
big_neg_value = -1 * big_pos_value
self.p1 = np.array([big_neg_value, big_pos_value, big_neg_value])
p2 = np.array([big_pos_value, big_neg_value, big_neg_value])
# Initialize avltree
point0 = eaf3D.ApproxPoint(None, 1000, self.p1)
point1 = eaf3D.ApproxPoint(None, 1001, p2)
node0 = bst.AVLNode(point0, 1)
node1 = bst.AVLNode(point1, 0)
self.t = bst.AVLTree()
self.t.root = node0
node0.right = node1
self.t.count = 2
# Import more data points
fname = 'example/run01'
exset = eaf3D.import_approximate_set(fname)
x, m = eaf3D.multiset_sum([exset])
# Q is X sorted in ascending order of the z coordinate
self.qstack = Stack()
xintoq = sorted(x.values(), key=attrgetter('z'))
for i in range(len(xintoq)):
self.qstack.push(xintoq[i])
# Add new data points to tree
for i in range(4):
p = self.qstack.pop()
self.t.insert(p)
def test_if_init_made_correctly(self):
self.assertEqual(self.t.root.balance, 0)
self.assertEqual(self.t.root.left.balance, 1)
self.assertEqual(self.t.root.right.balance, -1)
self.assertEqual(self.t.root.left.right.balance, 0)
self.assertEqual(self.t.root.right.left.balance, 0)
def test_insert_found(self):
expected = str(self.t)
self.t.insert(eaf3D.ApproxPoint(None, 1000, self.p1))
self.assertEqual(str(self.t), expected)
def test_insert_case2(self):
# Test insert function for adjust only
tree = bst.AVLTree()
tree.set_newroot(self.qstack.pop())
for i in range(12):
point = self.qstack.pop()
tree.insert(point)
# Insert nonvalid point to prove test
point1 = eaf3D.ApproxPoint(None, 1002, np.array([2, 0, 0]))
tree.insert(point1)
point2 = eaf3D.ApproxPoint(None, 1003, np.array([3.5, 0, 0]))
tree.insert(point2)
(pivot, theStack, parent, found) = tree.search(point1)
theStack.pop()
while not theStack.isEmpty():
node = theStack.pop()
self.assertEqual(node.balance, 0)
def test_insert_case1(self):
# Test for inserting into balanced tree
# Get balanced tree
tree = bst.AVLTree()
tree.set_newroot(self.qstack.pop())
for i in range(12):
point = self.qstack.pop()
tree.insert(point)
# Insert nonvalid point to balance tree
point1 = eaf3D.ApproxPoint(None, 1002, np.array([2, 0, 0]))
tree.insert(point1)
point2 = eaf3D.ApproxPoint(None, 1003, np.array([3.5, 0, 0]))
tree.insert(point2)
# Point to test
point3 = eaf3D.ApproxPoint(None, 1004, np.array([8, 0, 0]))
tree.insert(point3)
# Check that all nodes along stack have balance = 1
self.assertEqual(tree.count, 8)
(pivot, theStack, parent, found) = tree.search(point3)
theStack.pop()
while not theStack.isEmpty():
node = theStack.pop()
self.assertEqual(node.balance, 1)
def test_insert_case3_subcaseA(self):
point = eaf3D.ApproxPoint(None, 1002, np.array([4.5, 0, 0]))
(pivot, theStack, parent, found) = self.t.search(point)
self.t.insert(point)
self.assertEqual(pivot.balance, 0)
self.assertEqual(self.t.root.balance, 0)
def test_insert_case3_subcaseB(self):
for i in range(4):
self.qstack.pop()
point = self.qstack.pop()
self.t.insert(point)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.right.point, point)
self.assertEqual(self.t.root.right.right.balance, 0)
self.assertEqual(self.t.root.right.left.balance, 0)
def test_adjustBalances_negative(self):
for i in range(4):
self.qstack.pop()
point = self.qstack.pop()
(pivot, theStack, parent, found) = self.t.search(point)
newNode = bst.AVLNode(point)
self.t.adjustBalances_add(theStack, pivot, newNode)
self.assertEqual(pivot.balance, -2)
def test_floor_x(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([1, 28, 7000]))
q1 = self.t.floor_x(p1)
q2 = self.t.floor_x(p2)
q3 = self.t.floor_x(p3)
q4 = self.t.floor_x(p4)
q5 = self.t.floor_x(p5)
self.assertEqual(q1.x, 3)
self.assertEqual(q2.x, 4)
self.assertEqual(q3.x, 5)
self.assertEqual(q4.x, 5)
self.assertEqual(q5.x, -10E10)
def test_higher_x(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([1, 28, 7000]))
q1 = self.t.higher_x(p1)
q2 = self.t.higher_x(p2)
q3 = self.t.higher_x(p3)
q4 = self.t.higher_x(p4)
q5 = self.t.higher_x(p5)
self.assertEqual(q1.x, 4)
self.assertEqual(q2.x, 5)
self.assertEqual(q3.x, 10E10)
self.assertEqual(q4.x, 10E10)
self.assertEqual(q5.x, 3)
def test_lower_y(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([9, 6.5, 7000]))
q1 = self.t.lower_y(p1)
q2 = self.t.lower_y(p2)
q3 = self.t.lower_y(p3)
q4 = self.t.lower_y(p4)
q5 = self.t.lower_y(p5)
self.assertAlmostEqual(q1.y, 20.21, places=2)
self.assertAlmostEqual(q2.y, 12.67, places=2)
self.assertAlmostEqual(q3.y, 10.42, places=2)
self.assertAlmostEqual(q4.y, 10.42, places=2)
self.assertEqual(q5.y, -10E10)
def test_getRightMost(self):
rightmost, stack, pivot = self.t.getRightMost(self.t.root)
self.assertEqual(rightmost.point.x, 10E10)
rightmost, stack, pivot = self.t.getRightMost(self.t.root.left)
self.assertEqual(rightmost.point.x, 3)
def test_list_nodes_domxy(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 19.0, 6059.]))
list = self.t.list_nodes_domxy(p1)
self.assertListEqual(list, [self.t.root.left.right])
def test_height(self):
height = self.t.height(self.t.root)
self.assertEqual(height, 3)
def test_print_astree(self):
self.t.print_astree()
class RemoveNodeTests(unittest.TestCase):
def setUp(self):
self.t = bst.AVLTree()
# Import more data points
fname = 'example/run01'
exset = eaf3D.import_approximate_set(fname)
x, m = eaf3D.multiset_sum([exset])
# Q is X sorted in ascending order of the z coordinate
self.qstack = Stack()
xintoq = sorted(x.values(), key=attrgetter('z'))
for i in range(len(xintoq)):
self.qstack.push(xintoq[i])
self.t.set_newroot(self.qstack.pop())
# Add data points to tree
while not self.qstack.isEmpty():
p = self.qstack.pop()
self.t.insert(p)
p1 = eaf3D.ApproxPoint(None, 1001, np.array([12, 6.6, 0]))
self.t.insert(p1)
p2 = eaf3D.ApproxPoint(None, 1002, np.array([8, 9.7, 0]))
self.t.insert(p2)
p3 = eaf3D.ApproxPoint(None, 1002, np.array([10, 7.5, 0]))
self.t.insert(p3)
def test_remove_node_case1_norotation(self):
# This tests if it correctly removes a node without children from
# the tree
node, theStack, pivot = self.t.getRightMost(self.t.root)
self.t.remove_node(node.left)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.balance, 0)
def test_remove_node_case1_wrotation(self):
# This tests if it correctly removes a node without children and
# performs the necessary rotation.
self.t.remove_node(self.t.root.left.left)
self.assertEqual(self.t.count, 7)
self.assertNotEqual(self.t.root.balance, 2)
self.check_node_balances(self.t)
def test_remove_node_case2_norotation(self):
# This tests if AVLTree correctly removes a node and connects its
# one child to its parent node.
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root)
# Get node for comparison
child = rightmost.left
# Remove node
self.t.remove_node(rightmost)
self.check_node_balances(self.t)
newright, rightStack, pivot = self.t.getRightMost(self.t.root)
self.assertEqual(newright, child)
def test_remove_node_case3_longer_subtree(self):
# This tests if AVLTree correctly removes a node, connects its
# one child to its parent node, and performs the necessary rotation
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.right.left)
self.t.remove_node(self.t.root.right)
# There would have been a left rotation at root.right
self.assertEqual(self.t.root.right.left.point, rightmost.point)
self.check_node_balances(self.t)
def test_remove_node_case3_smaller_subtree(self):
# This tests if AVLTree correctly removes a node, connects its
# one child to its parent node, and performs the necessary rotation
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.left.left)
self.t.remove_node(self.t.root.left)
self.assertEqual(self.t.root.left.point, rightmost.point)
self.check_node_balances(self.t)
def test_remove_node_case3_root(self):
# This tests if AVLTree correctly removes the root node with
# children
# Try with root and long stack
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.left)
self.t.remove_node(self.t.root)
self.assertEqual(self.t.root.point, rightmost.point)
self.check_node_balances(self.t)
# Try with no stack (get down to two nodes)
for i in range(5):
self.t.remove_node(self.t.root)
# Make sure no errors pop up in removal
self.check_node_balances(self.t)
def test_remove_node_case3_rootright(self):
# This tests for a specific problem that popped where the right
# node was removed and the balance at the root was wrong
# Get tree to position that caused error, checking for new errors
self.t.remove_node(self.t.root)
self.check_node_balances(self.t)
self.t.remove_node(self.t.root.right.right)
self.check_node_balances(self.t)
# The tree should now be balance=0 at root with right with two kids
# After removing right, the root should still be balanced
self.t.remove_node(self.t.root.right)
self.check_node_balances(self.t)
def test_remove_node_case3_left_is_rightmost(self):
# Set up tree to test case 3 when the left node is the rightmost
tree = bst.AVLTree()
point0 = eaf3D.ApproxPoint(1, 1, np.array([4, 12.67, 0]))
tree.set_newroot(point0)
# Set initial points for sentinels (to simulate infinity)
big_pos_value = 10E10
big_neg_value = -1 * big_pos_value
p1 = np.array([big_neg_value, big_pos_value, big_neg_value])
p2 = np.array([big_pos_value, big_neg_value, big_neg_value])
# Initialize avltree
point1 = eaf3D.ApproxPoint(None, 1000, p1)
av1 = bst.AVLNode(point1, balance=1)
tree.root.left = av1
point2 = eaf3D.ApproxPoint(None, 1001, p2)
av2 = bst.AVLNode(point2, balance=-1)
tree.root.right = av2
point3 = eaf3D.ApproxPoint(1, 2, np.array([3, 20.21, 0]))
av3 = bst.AVLNode(point3)
tree.root.left.right = av3
point4 = eaf3D.ApproxPoint(1, 3, np.array([5, 10.42, 0]))
av4 = bst.AVLNode(point4)
tree.root.right.left = av4
tree.count = 5
tree.remove_node(tree.root)
tree.remove_node(tree.root)
self.assertEqual(tree.root, av4)
self.check_node_balances(tree)
def check_node_balances(self, tree):
# This module performs a check of all the balances in the tree
leaves = [tree.root]
while any(leaves):
for f in range(len(leaves)):
if leaves[f]:
correct_balance = tree.recalculate_balance(leaves[f])
self.assertEqual(leaves[f].balance, correct_balance)
leaves = tree.next_tree_row(leaves)
if __name__ == '__main__':
unittest.main() | Analysis/eaf3D/test_avl_eaf3d.py | import unittest
import avltree_eaf3d as bst
import eaf3D
import numpy as np
from operator import attrgetter
from stack import Stack
class AVLTreeTestsFromStarter(unittest.TestCase):
def setUp(self):
# Set initial points for sentinels (to simulate infinity)
big_pos_value = 10E10
big_neg_value = -1 * big_pos_value
self.p1 = np.array([big_neg_value, big_pos_value, big_neg_value])
p2 = np.array([big_pos_value, big_neg_value, big_neg_value])
# Initialize avltree
point0 = eaf3D.ApproxPoint(None, 1000, self.p1)
point1 = eaf3D.ApproxPoint(None, 1001, p2)
node0 = bst.AVLNode(point0, 1)
node1 = bst.AVLNode(point1, 0)
self.t = bst.AVLTree()
self.t.root = node0
node0.right = node1
self.t.count = 2
# Import more data points
fname = 'example/run01'
exset = eaf3D.import_approximate_set(fname)
x, m = eaf3D.multiset_sum([exset])
# Q is X sorted in ascending order of the z coordinate
self.qstack = Stack()
xintoq = sorted(x.values(), key=attrgetter('z'))
for i in range(len(xintoq)):
self.qstack.push(xintoq[i])
# Add new data points to tree
for i in range(4):
p = self.qstack.pop()
self.t.insert(p)
def test_if_init_made_correctly(self):
self.assertEqual(self.t.root.balance, 0)
self.assertEqual(self.t.root.left.balance, 1)
self.assertEqual(self.t.root.right.balance, -1)
self.assertEqual(self.t.root.left.right.balance, 0)
self.assertEqual(self.t.root.right.left.balance, 0)
def test_insert_found(self):
expected = str(self.t)
self.t.insert(eaf3D.ApproxPoint(None, 1000, self.p1))
self.assertEqual(str(self.t), expected)
def test_insert_case2(self):
# Test insert function for adjust only
tree = bst.AVLTree()
tree.set_newroot(self.qstack.pop())
for i in range(12):
point = self.qstack.pop()
tree.insert(point)
# Insert nonvalid point to prove test
point1 = eaf3D.ApproxPoint(None, 1002, np.array([2, 0, 0]))
tree.insert(point1)
point2 = eaf3D.ApproxPoint(None, 1003, np.array([3.5, 0, 0]))
tree.insert(point2)
(pivot, theStack, parent, found) = tree.search(point1)
theStack.pop()
while not theStack.isEmpty():
node = theStack.pop()
self.assertEqual(node.balance, 0)
def test_insert_case1(self):
# Test for inserting into balanced tree
# Get balanced tree
tree = bst.AVLTree()
tree.set_newroot(self.qstack.pop())
for i in range(12):
point = self.qstack.pop()
tree.insert(point)
# Insert nonvalid point to balance tree
point1 = eaf3D.ApproxPoint(None, 1002, np.array([2, 0, 0]))
tree.insert(point1)
point2 = eaf3D.ApproxPoint(None, 1003, np.array([3.5, 0, 0]))
tree.insert(point2)
# Point to test
point3 = eaf3D.ApproxPoint(None, 1004, np.array([8, 0, 0]))
tree.insert(point3)
# Check that all nodes along stack have balance = 1
self.assertEqual(tree.count, 8)
(pivot, theStack, parent, found) = tree.search(point3)
theStack.pop()
while not theStack.isEmpty():
node = theStack.pop()
self.assertEqual(node.balance, 1)
def test_insert_case3_subcaseA(self):
point = eaf3D.ApproxPoint(None, 1002, np.array([4.5, 0, 0]))
(pivot, theStack, parent, found) = self.t.search(point)
self.t.insert(point)
self.assertEqual(pivot.balance, 0)
self.assertEqual(self.t.root.balance, 0)
def test_insert_case3_subcaseB(self):
for i in range(4):
self.qstack.pop()
point = self.qstack.pop()
self.t.insert(point)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.right.point, point)
self.assertEqual(self.t.root.right.right.balance, 0)
self.assertEqual(self.t.root.right.left.balance, 0)
def test_adjustBalances_negative(self):
for i in range(4):
self.qstack.pop()
point = self.qstack.pop()
(pivot, theStack, parent, found) = self.t.search(point)
newNode = bst.AVLNode(point)
self.t.adjustBalances_add(theStack, pivot, newNode)
self.assertEqual(pivot.balance, -2)
def test_floor_x(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([1, 28, 7000]))
q1 = self.t.floor_x(p1)
q2 = self.t.floor_x(p2)
q3 = self.t.floor_x(p3)
q4 = self.t.floor_x(p4)
q5 = self.t.floor_x(p5)
self.assertEqual(q1.x, 3)
self.assertEqual(q2.x, 4)
self.assertEqual(q3.x, 5)
self.assertEqual(q4.x, 5)
self.assertEqual(q5.x, -10E10)
def test_higher_x(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([1, 28, 7000]))
q1 = self.t.higher_x(p1)
q2 = self.t.higher_x(p2)
q3 = self.t.higher_x(p3)
q4 = self.t.higher_x(p4)
q5 = self.t.higher_x(p5)
self.assertEqual(q1.x, 4)
self.assertEqual(q2.x, 5)
self.assertEqual(q3.x, 10E10)
self.assertEqual(q4.x, 10E10)
self.assertEqual(q5.x, 3)
def test_lower_y(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 23.4623828, 6059.2348600000005]))
p2 = eaf3D.ApproxPoint(1, 1003, np.array([4, 14.07345342, 5990.93696]))
p3 = eaf3D.ApproxPoint(1, 1004, np.array([5, 10.90633272, 5965.522494]))
p4 = eaf3D.ApproxPoint(1, 1005, np.array([7, 10.73267638, 5868.0173159999995]))
p5 = eaf3D.ApproxPoint(0, 1006, np.array([9, 6.5, 7000]))
q1 = self.t.lower_y(p1)
q2 = self.t.lower_y(p2)
q3 = self.t.lower_y(p3)
q4 = self.t.lower_y(p4)
q5 = self.t.lower_y(p5)
self.assertAlmostEqual(q1.y, 20.21, places=2)
self.assertAlmostEqual(q2.y, 12.67, places=2)
self.assertAlmostEqual(q3.y, 10.42, places=2)
self.assertAlmostEqual(q4.y, 10.42, places=2)
self.assertEqual(q5.y, -10E10)
def test_getRightMost(self):
rightmost, stack, pivot = self.t.getRightMost(self.t.root)
self.assertEqual(rightmost.point.x, 10E10)
rightmost, stack, pivot = self.t.getRightMost(self.t.root.left)
self.assertEqual(rightmost.point.x, 3)
def test_list_nodes_domxy(self):
p1 = eaf3D.ApproxPoint(1, 1002, np.array([3, 19.0, 6059.]))
list = self.t.list_nodes_domxy(p1)
self.assertListEqual(list, [self.t.root.left.right])
def test_height(self):
height = self.t.height(self.t.root)
self.assertEqual(height, 3)
def test_print_astree(self):
self.t.print_astree()
class RemoveNodeTests(unittest.TestCase):
def setUp(self):
self.t = bst.AVLTree()
# Import more data points
fname = 'example/run01'
exset = eaf3D.import_approximate_set(fname)
x, m = eaf3D.multiset_sum([exset])
# Q is X sorted in ascending order of the z coordinate
self.qstack = Stack()
xintoq = sorted(x.values(), key=attrgetter('z'))
for i in range(len(xintoq)):
self.qstack.push(xintoq[i])
self.t.set_newroot(self.qstack.pop())
# Add data points to tree
while not self.qstack.isEmpty():
p = self.qstack.pop()
self.t.insert(p)
p1 = eaf3D.ApproxPoint(None, 1001, np.array([12, 6.6, 0]))
self.t.insert(p1)
p2 = eaf3D.ApproxPoint(None, 1002, np.array([8, 9.7, 0]))
self.t.insert(p2)
p3 = eaf3D.ApproxPoint(None, 1002, np.array([10, 7.5, 0]))
self.t.insert(p3)
def test_remove_node_case1_norotation(self):
# This tests if it correctly removes a node without children from
# the tree
node, theStack, pivot = self.t.getRightMost(self.t.root)
self.t.remove_node(node.left)
self.assertEqual(self.t.root.right.balance, 0)
self.assertEqual(self.t.root.balance, 0)
def test_remove_node_case1_wrotation(self):
# This tests if it correctly removes a node without children and
# performs the necessary rotation.
self.t.remove_node(self.t.root.left.left)
self.assertEqual(self.t.count, 7)
self.assertNotEqual(self.t.root.balance, 2)
self.check_node_balances(self.t)
def test_remove_node_case2_norotation(self):
# This tests if AVLTree correctly removes a node and connects its
# one child to its parent node.
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root)
# Get node for comparison
child = rightmost.left
# Remove node
self.t.remove_node(rightmost)
self.check_node_balances(self.t)
newright, rightStack, pivot = self.t.getRightMost(self.t.root)
self.assertEqual(newright, child)
def test_remove_node_case3_longer_subtree(self):
# This tests if AVLTree correctly removes a node, connects its
# one child to its parent node, and performs the necessary rotation
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.right.left)
self.t.remove_node(self.t.root.right)
# There would have been a left rotation at root.right
self.assertEqual(self.t.root.right.left.point, rightmost.point)
self.check_node_balances(self.t)
def test_remove_node_case3_smaller_subtree(self):
# This tests if AVLTree correctly removes a node, connects its
# one child to its parent node, and performs the necessary rotation
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.left.left)
self.t.remove_node(self.t.root.left)
self.assertEqual(self.t.root.left.point, rightmost.point)
self.check_node_balances(self.t)
def test_remove_node_case3_root(self):
# This tests if AVLTree correctly removes the root node with
# children
# Try with root and long stack
rightmost, rightStack, pivot = self.t.getRightMost(self.t.root.left)
self.t.remove_node(self.t.root)
self.assertEqual(self.t.root.point, rightmost.point)
self.check_node_balances(self.t)
# Try with no stack (get down to two nodes)
for i in range(5):
self.t.remove_node(self.t.root)
# Make sure no errors pop up in removal
self.check_node_balances(self.t)
def test_remove_node_case3_rootright(self):
# This tests for a specific problem that popped where the right
# node was removed and the balance at the root was wrong
# Get tree to position that caused error, checking for new errors
self.t.remove_node(self.t.root)
self.check_node_balances(self.t)
self.t.remove_node(self.t.root.right.right)
self.check_node_balances(self.t)
# The tree should now be balance=0 at root with right with two kids
# After removing right, the root should still be balanced
self.t.remove_node(self.t.root.right)
self.check_node_balances(self.t)
def test_remove_node_case3_left_is_rightmost(self):
# Set up tree to test case 3 when the left node is the rightmost
tree = bst.AVLTree()
point0 = eaf3D.ApproxPoint(1, 1, np.array([4, 12.67, 0]))
tree.set_newroot(point0)
# Set initial points for sentinels (to simulate infinity)
big_pos_value = 10E10
big_neg_value = -1 * big_pos_value
p1 = np.array([big_neg_value, big_pos_value, big_neg_value])
p2 = np.array([big_pos_value, big_neg_value, big_neg_value])
# Initialize avltree
point1 = eaf3D.ApproxPoint(None, 1000, p1)
av1 = bst.AVLNode(point1, balance=1)
tree.root.left = av1
point2 = eaf3D.ApproxPoint(None, 1001, p2)
av2 = bst.AVLNode(point2, balance=-1)
tree.root.right = av2
point3 = eaf3D.ApproxPoint(1, 2, np.array([3, 20.21, 0]))
av3 = bst.AVLNode(point3)
tree.root.left.right = av3
point4 = eaf3D.ApproxPoint(1, 3, np.array([5, 10.42, 0]))
av4 = bst.AVLNode(point4)
tree.root.right.left = av4
tree.count = 5
tree.remove_node(tree.root)
tree.remove_node(tree.root)
self.assertEqual(tree.root, av4)
self.check_node_balances(tree)
def check_node_balances(self, tree):
# This module performs a check of all the balances in the tree
leaves = [tree.root]
while any(leaves):
for f in range(len(leaves)):
if leaves[f]:
correct_balance = tree.recalculate_balance(leaves[f])
self.assertEqual(leaves[f].balance, correct_balance)
leaves = tree.next_tree_row(leaves)
if __name__ == '__main__':
unittest.main() | 0.603698 | 0.548492 |
import requests, json, textwrap, time, os, glob, random, hashlib
from random import randint
from PIL import Image, ImageDraw, ImageFont, ImageStat, ImageFilter, ImageEnhance
import nltk
from resizeimage import resizeimage
from tinydb import TinyDB, Query
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from pynput.keyboard import Key, Controller
import autoit
def brightness(im):
#im = Image.open(im_file).convert('L')
stat = ImageStat.Stat(im)
return stat.mean[0]
db = TinyDB('./db.json')
with open('myauth.json') as json_file:
data = json.load(json_file)
firebase = data['firebase']
username = data['username']
password = data['password']
unsplashApiKey = data['unsplash']['apiKey']
firebaseEmail = data['firebase-email']
firebasePassword = data['<PASSWORD>']
ritekitClientId = data['ritekitClientId']
ritekitClientSecret = data['ritekitClientSecret']
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
image_size_x = 1080
image_size_y = 1350
blur = 0
color = 'rgb(0, 0, 0)'
done = 0
while done >= 0 and done < 9:
try:
URL = "https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=jsonp&jsonp=?"
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = data[2:]
data = data[:-1]
data = json.loads(data)
text = data['quoteText']
author = data['quoteAuthor']
print(text)
print(author)
# get text hash
md5hash = hashlib.md5(text)
print(md5hash.hexdigest())
while db.search(Query().textMd5 == md5hash.hexdigest()):
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = data[2:]
data = data[:-1]
data = json.loads(data)
text = data['quoteText']
author = data['quoteAuthor']
print(text)
print(author)
# get text hash
md5hash = hashlib.md5(text)
db.insert({'textMd5': md5hash.hexdigest()})
done = 10
except:
done += 1
pass
#image = requests.get("https://picsum.photos/" + str(image_size_x) + "/" + str(image_size_y) + "/?blur=" + str(blur)).content-----
is_noun = lambda pos: pos[:2] == 'NN'
tokenized = nltk.word_tokenize(text)
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
URL = "https://source.unsplash.com/1080x1350/?" + noun
r = requests.get(url = URL)
if r.status_code == 200:
break
if len(nouns) == 0 or r.status_code != 200:
print ("< Using Default >")
URL = "https://source.unsplash.com/random/1080x1350"
image = requests.get(URL).content
with open('latest.png', 'wb') as handler:
handler.write(image)
with open('latest.png', 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, [1080, 1350])
cover.save('latest.png', "PNG")
# get image hash
md5hash = hashlib.md5(Image.open('latest.png').tobytes())
print(md5hash.hexdigest())
while db.search(Query().imageMd5 == md5hash.hexdigest()):
image = requests.get(URL).content
with open('latest.png', 'wb') as handler:
handler.write(image)
with open('latest.png', 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, [1080, 1350])
cover.save('latest.png', "PNG")
md5hash = hashlib.md5(Image.open('latest.png').tobytes())
db.insert({'imageMd5': md5hash.hexdigest()})
background = cover.convert('LA')
background = ImageEnhance.Contrast(background).enhance(random.uniform(0.7, 1.0))
background = background.filter(ImageFilter.GaussianBlur(radius = randint(0, 5)))
crop_rectangle = (200, 200, image_size_x - 200, image_size_y - 400)
cropped_im = background.crop(crop_rectangle)
template = "template_black.png"
if int(brightness(cropped_im)) < 120:
color = 'rgb(255, 255, 255)'
background = background.point(lambda p: p * 0.9) #darken
template = "template_white.png"
foreground = Image.open(template)
background.paste(foreground, (0, 0), foreground)
img = background
draw = ImageDraw.Draw(img)
font_name = random.choice(glob.glob("./fonts/*.ttf"))
print ("font: " + font_name)
font = ImageFont.truetype(font_name, size=65)
para = textwrap.wrap(text, width=25)
current_h, pad = 250, 10
for line in para:
w, h = draw.textsize(line, font=font)
draw.text(((image_size_x - w) / 2, current_h), line, font=font, fill=color)
current_h += h + pad
if author:
font = ImageFont.truetype(font_name, size=75)
current_h += h + pad
para = textwrap.wrap(author, width=15)
for line in para:
w, h = draw.textsize(line, font=font)
draw.text(((image_size_x - w) / 2, current_h), line + ".", font=font, fill=color)
current_h += h + pad
img.save("ready.png", "PNG")
if os.path.exists("latest.png"):
os.remove("latest.png")
photo_path = "ready.png"
'''
numberOfHashtags = 5
URL = "https://api.ritekit.com/v1/stats/auto-hashtag?post="
for word in text.split():
URL += word
URL += "%20"
URL += "&maxHashtags=" + str(numberOfHashtags) + "&hashtagPosition=auto&client_id=" + ritekitClientId
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = json.loads(data)
caption = data['post']
'''
'''
caption = text + "\n.\n" + "Follow us for your daily motivation : @novus.quotes\n.\n #tag that person who needs to see this\n @novus.quotes\n @novus.quotes\n @novus.quotes\n.\n" + "#quotes #inspirationalquotes #inspirational #quotesForYou #motivateyourmind #motivatemyself #quotesToInspire #motivational #quotesToRemember #successquotes #lovequotes #success #quotesoftheday #funquotes #motivatinalquotes #quotesdaily #minimal #Novusquotes #novus #positivevibes"
caption+="\n "
for word in author.split():
caption += "#" + word + " "
caption+="\n"
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
caption+= "#" + noun + " "
caption+="\n #"
for word in author.split():
caption += word
print (caption)
'''
image_path = os.path.dirname(os.path.abspath(__file__)) + "\\ready.png"
print(image_path)
mobile_emulation = { "deviceName": "Pixel 2" }
opts = webdriver.ChromeOptions()
opts.add_experimental_option("mobileEmulation", mobile_emulation)
#opts.add_argument("headless")
driver = webdriver.Chrome(executable_path=r"./chromedriver",options=opts)
local_url = "file:///" + os.getcwd() + "/desc"
print(local_url)
driver.get(local_url)
keyboard = Controller()
keyboard.press(Key.ctrl)
keyboard.press('a')
keyboard.release('a')
keyboard.release(Key.ctrl)
keyboard.press(Key.ctrl)
keyboard.press('c')
keyboard.release('c')
keyboard.release(Key.ctrl)
main_url = "https://www.instagram.com"
driver.get(main_url)
sleep(4)
def login():
login_button = driver.find_element_by_xpath("//button[contains(text(),'Log In')]")
login_button.click()
sleep(randint(3,4))
username_input = driver.find_element_by_xpath("//input[@name='username']")
username_input.send_keys(username)
password_input = driver.find_element_by_xpath("//input[@name='password']")
password_input.send_keys(password)
password_input.submit()
login()
sleep(randint(3,4))
def close_reactivated():
try:
sleep(randint(2,3))
not_now_btn = driver.find_element_by_xpath("//a[contains(text(),'Not Now')]")
not_now_btn.click()
except:
pass
close_reactivated()
def close_notification():
try:
sleep(randint(2,3))
close_noti_btn = driver.find_element_by_xpath("//button[contains(text(),'Not Now')]")
close_noti_btn.click()
sleep(randint(2,3))
except:
pass
close_notification()
def close_add_to_home():
sleep(randint(3,4))
close_addHome_btn = driver.find_element_by_xpath("//button[contains(text(),'Cancel')]")
close_addHome_btn.click()
sleep(randint(1,2))
close_add_to_home()
sleep(randint(3,4))
close_notification()
new_post_btn = driver.find_element_by_xpath("//div[@role='menuitem']").click()
sleep(randint(3,4))
autoit.win_active("Open")
sleep(randint(3,4))
autoit.control_send("Open","Edit1",image_path)
sleep(randint(3,4))
autoit.control_send("Open","Edit1","{ENTER}")
sleep(randint(3,4))
driver.find_element_by_xpath("//button[@class='pHnkA']").click()
sleep(randint(3,4))
next_btn = driver.find_element_by_xpath("//button[contains(text(),'Next')]").click()
sleep(randint(3,4))
caption_field = driver.find_element_by_xpath("//textarea[@aria-label='Write a caption…']")
caption = text
caption_field.send_keys(caption)
keyboard.press(Key.ctrl)
keyboard.press('v')
keyboard.release('v')
keyboard.release(Key.ctrl)
caption = ""
if author:
for word in author.split():
caption += "#" + word + " "
caption+="\n #"
for word in author.split():
caption += word
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
caption+= "#" + noun + " "
print(caption)
caption_field.send_keys(caption)
sleep(1)
share_btn = driver.find_element_by_xpath("//button[contains(text(),'Share')]").click()
sleep(10)
driver.close()
sleep(4) | postImageWin.py | import requests, json, textwrap, time, os, glob, random, hashlib
from random import randint
from PIL import Image, ImageDraw, ImageFont, ImageStat, ImageFilter, ImageEnhance
import nltk
from resizeimage import resizeimage
from tinydb import TinyDB, Query
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from pynput.keyboard import Key, Controller
import autoit
def brightness(im):
#im = Image.open(im_file).convert('L')
stat = ImageStat.Stat(im)
return stat.mean[0]
db = TinyDB('./db.json')
with open('myauth.json') as json_file:
data = json.load(json_file)
firebase = data['firebase']
username = data['username']
password = data['password']
unsplashApiKey = data['unsplash']['apiKey']
firebaseEmail = data['firebase-email']
firebasePassword = data['<PASSWORD>']
ritekitClientId = data['ritekitClientId']
ritekitClientSecret = data['ritekitClientSecret']
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
image_size_x = 1080
image_size_y = 1350
blur = 0
color = 'rgb(0, 0, 0)'
done = 0
while done >= 0 and done < 9:
try:
URL = "https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=jsonp&jsonp=?"
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = data[2:]
data = data[:-1]
data = json.loads(data)
text = data['quoteText']
author = data['quoteAuthor']
print(text)
print(author)
# get text hash
md5hash = hashlib.md5(text)
print(md5hash.hexdigest())
while db.search(Query().textMd5 == md5hash.hexdigest()):
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = data[2:]
data = data[:-1]
data = json.loads(data)
text = data['quoteText']
author = data['quoteAuthor']
print(text)
print(author)
# get text hash
md5hash = hashlib.md5(text)
db.insert({'textMd5': md5hash.hexdigest()})
done = 10
except:
done += 1
pass
#image = requests.get("https://picsum.photos/" + str(image_size_x) + "/" + str(image_size_y) + "/?blur=" + str(blur)).content-----
is_noun = lambda pos: pos[:2] == 'NN'
tokenized = nltk.word_tokenize(text)
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
URL = "https://source.unsplash.com/1080x1350/?" + noun
r = requests.get(url = URL)
if r.status_code == 200:
break
if len(nouns) == 0 or r.status_code != 200:
print ("< Using Default >")
URL = "https://source.unsplash.com/random/1080x1350"
image = requests.get(URL).content
with open('latest.png', 'wb') as handler:
handler.write(image)
with open('latest.png', 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, [1080, 1350])
cover.save('latest.png', "PNG")
# get image hash
md5hash = hashlib.md5(Image.open('latest.png').tobytes())
print(md5hash.hexdigest())
while db.search(Query().imageMd5 == md5hash.hexdigest()):
image = requests.get(URL).content
with open('latest.png', 'wb') as handler:
handler.write(image)
with open('latest.png', 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, [1080, 1350])
cover.save('latest.png', "PNG")
md5hash = hashlib.md5(Image.open('latest.png').tobytes())
db.insert({'imageMd5': md5hash.hexdigest()})
background = cover.convert('LA')
background = ImageEnhance.Contrast(background).enhance(random.uniform(0.7, 1.0))
background = background.filter(ImageFilter.GaussianBlur(radius = randint(0, 5)))
crop_rectangle = (200, 200, image_size_x - 200, image_size_y - 400)
cropped_im = background.crop(crop_rectangle)
template = "template_black.png"
if int(brightness(cropped_im)) < 120:
color = 'rgb(255, 255, 255)'
background = background.point(lambda p: p * 0.9) #darken
template = "template_white.png"
foreground = Image.open(template)
background.paste(foreground, (0, 0), foreground)
img = background
draw = ImageDraw.Draw(img)
font_name = random.choice(glob.glob("./fonts/*.ttf"))
print ("font: " + font_name)
font = ImageFont.truetype(font_name, size=65)
para = textwrap.wrap(text, width=25)
current_h, pad = 250, 10
for line in para:
w, h = draw.textsize(line, font=font)
draw.text(((image_size_x - w) / 2, current_h), line, font=font, fill=color)
current_h += h + pad
if author:
font = ImageFont.truetype(font_name, size=75)
current_h += h + pad
para = textwrap.wrap(author, width=15)
for line in para:
w, h = draw.textsize(line, font=font)
draw.text(((image_size_x - w) / 2, current_h), line + ".", font=font, fill=color)
current_h += h + pad
img.save("ready.png", "PNG")
if os.path.exists("latest.png"):
os.remove("latest.png")
photo_path = "ready.png"
'''
numberOfHashtags = 5
URL = "https://api.ritekit.com/v1/stats/auto-hashtag?post="
for word in text.split():
URL += word
URL += "%20"
URL += "&maxHashtags=" + str(numberOfHashtags) + "&hashtagPosition=auto&client_id=" + ritekitClientId
r = requests.get(url = URL)
while not r:
r = requests.get(url = URL)
data = r.content
data = json.loads(data)
caption = data['post']
'''
'''
caption = text + "\n.\n" + "Follow us for your daily motivation : @novus.quotes\n.\n #tag that person who needs to see this\n @novus.quotes\n @novus.quotes\n @novus.quotes\n.\n" + "#quotes #inspirationalquotes #inspirational #quotesForYou #motivateyourmind #motivatemyself #quotesToInspire #motivational #quotesToRemember #successquotes #lovequotes #success #quotesoftheday #funquotes #motivatinalquotes #quotesdaily #minimal #Novusquotes #novus #positivevibes"
caption+="\n "
for word in author.split():
caption += "#" + word + " "
caption+="\n"
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
caption+= "#" + noun + " "
caption+="\n #"
for word in author.split():
caption += word
print (caption)
'''
image_path = os.path.dirname(os.path.abspath(__file__)) + "\\ready.png"
print(image_path)
mobile_emulation = { "deviceName": "Pixel 2" }
opts = webdriver.ChromeOptions()
opts.add_experimental_option("mobileEmulation", mobile_emulation)
#opts.add_argument("headless")
driver = webdriver.Chrome(executable_path=r"./chromedriver",options=opts)
local_url = "file:///" + os.getcwd() + "/desc"
print(local_url)
driver.get(local_url)
keyboard = Controller()
keyboard.press(Key.ctrl)
keyboard.press('a')
keyboard.release('a')
keyboard.release(Key.ctrl)
keyboard.press(Key.ctrl)
keyboard.press('c')
keyboard.release('c')
keyboard.release(Key.ctrl)
main_url = "https://www.instagram.com"
driver.get(main_url)
sleep(4)
def login():
login_button = driver.find_element_by_xpath("//button[contains(text(),'Log In')]")
login_button.click()
sleep(randint(3,4))
username_input = driver.find_element_by_xpath("//input[@name='username']")
username_input.send_keys(username)
password_input = driver.find_element_by_xpath("//input[@name='password']")
password_input.send_keys(password)
password_input.submit()
login()
sleep(randint(3,4))
def close_reactivated():
try:
sleep(randint(2,3))
not_now_btn = driver.find_element_by_xpath("//a[contains(text(),'Not Now')]")
not_now_btn.click()
except:
pass
close_reactivated()
def close_notification():
try:
sleep(randint(2,3))
close_noti_btn = driver.find_element_by_xpath("//button[contains(text(),'Not Now')]")
close_noti_btn.click()
sleep(randint(2,3))
except:
pass
close_notification()
def close_add_to_home():
sleep(randint(3,4))
close_addHome_btn = driver.find_element_by_xpath("//button[contains(text(),'Cancel')]")
close_addHome_btn.click()
sleep(randint(1,2))
close_add_to_home()
sleep(randint(3,4))
close_notification()
new_post_btn = driver.find_element_by_xpath("//div[@role='menuitem']").click()
sleep(randint(3,4))
autoit.win_active("Open")
sleep(randint(3,4))
autoit.control_send("Open","Edit1",image_path)
sleep(randint(3,4))
autoit.control_send("Open","Edit1","{ENTER}")
sleep(randint(3,4))
driver.find_element_by_xpath("//button[@class='pHnkA']").click()
sleep(randint(3,4))
next_btn = driver.find_element_by_xpath("//button[contains(text(),'Next')]").click()
sleep(randint(3,4))
caption_field = driver.find_element_by_xpath("//textarea[@aria-label='Write a caption…']")
caption = text
caption_field.send_keys(caption)
keyboard.press(Key.ctrl)
keyboard.press('v')
keyboard.release('v')
keyboard.release(Key.ctrl)
caption = ""
if author:
for word in author.split():
caption += "#" + word + " "
caption+="\n #"
for word in author.split():
caption += word
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
if len(nouns) != 0:
for noun in nouns:
caption+= "#" + noun + " "
print(caption)
caption_field.send_keys(caption)
sleep(1)
share_btn = driver.find_element_by_xpath("//button[contains(text(),'Share')]").click()
sleep(10)
driver.close()
sleep(4) | 0.282691 | 0.120594 |
import torch
import torchvision
from torch import nn
import logging
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import time
import os
import copy
import logging
import sys
sys.path.append('../')
from Model.fcdn import FCDenseNet as UNet
from Data.get_super_synth_loader import get_dataloader
from Model.loss import CrossEntropyLoss2d_unet as CrossEntropyLoss2d
from Train_unet_module import train_model
from Options.Udense_options import *
config = udense_v1
print("----Config name: %s-----------batch_size: %d----------"%(config.name,config.batch_size))
model = UNet(in_channels=3, down_blocks=config.down_blocks,\
up_blocks=config.up_blocks, bottleneck_layers=config.bottleneck_layers,\
growth_rate=config.growth_rate,\
out_chans_first_conv=config.out_chans_first_conv, n_classes=config.n_classes)
device_ids=config.device_ids
device = torch.device('cuda:{}'.format(','.join([str(i) for i in device_ids])) \
if torch.cuda.device_count()>0 else torch.device('cpu'))
model_ft = nn.DataParallel(model, device_ids, dim=0)
model_ft.to(device)
criterion = CrossEntropyLoss2d()
# Observe that all parameters are being optimized
if config.optim == 'sgd':
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
else:
optimizer_ft = optim.RMSprop(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=config.milestones, gamma=0.1, )
dataloaders = {'train':get_dataloader(batch_size=config.batch_size,\
root_dir = config.train_path,mask_dir=config.mask_dir,\
num_workers=config.num_workers),\
'val':get_dataloader(batch_size= config.batch_size,\
root_dir = config.test_path,mask_dir=config.mask_dir,\
num_workers=config.num_workers)}
model_ft = train_model(model_ft, dataloaders, criterion, optimizer_ft, exp_lr_scheduler,\
num_epochs=config.num_epochs,save_epoch=config.save_epoch,\
display_size=config.display_size,save_path= config.save_path) | Train/Train_fcdn.py | import torch
import torchvision
from torch import nn
import logging
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import time
import os
import copy
import logging
import sys
sys.path.append('../')
from Model.fcdn import FCDenseNet as UNet
from Data.get_super_synth_loader import get_dataloader
from Model.loss import CrossEntropyLoss2d_unet as CrossEntropyLoss2d
from Train_unet_module import train_model
from Options.Udense_options import *
config = udense_v1
print("----Config name: %s-----------batch_size: %d----------"%(config.name,config.batch_size))
model = UNet(in_channels=3, down_blocks=config.down_blocks,\
up_blocks=config.up_blocks, bottleneck_layers=config.bottleneck_layers,\
growth_rate=config.growth_rate,\
out_chans_first_conv=config.out_chans_first_conv, n_classes=config.n_classes)
device_ids=config.device_ids
device = torch.device('cuda:{}'.format(','.join([str(i) for i in device_ids])) \
if torch.cuda.device_count()>0 else torch.device('cpu'))
model_ft = nn.DataParallel(model, device_ids, dim=0)
model_ft.to(device)
criterion = CrossEntropyLoss2d()
# Observe that all parameters are being optimized
if config.optim == 'sgd':
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
else:
optimizer_ft = optim.RMSprop(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=config.milestones, gamma=0.1, )
dataloaders = {'train':get_dataloader(batch_size=config.batch_size,\
root_dir = config.train_path,mask_dir=config.mask_dir,\
num_workers=config.num_workers),\
'val':get_dataloader(batch_size= config.batch_size,\
root_dir = config.test_path,mask_dir=config.mask_dir,\
num_workers=config.num_workers)}
model_ft = train_model(model_ft, dataloaders, criterion, optimizer_ft, exp_lr_scheduler,\
num_epochs=config.num_epochs,save_epoch=config.save_epoch,\
display_size=config.display_size,save_path= config.save_path) | 0.583203 | 0.183411 |
from __future__ import print_function
import argparse
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import vgg, resnet, densenet
import numpy as np
import os
import sys
from tqdm import tqdm
from utils import *
import glob
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cifar10 Evaluation')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for cps')
parser.add_argument('--data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 100)')
parser.add_argument('--model', choices=['vgg', 'resnet', 'densenet'], default='resnet')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--workers', type=int, default=4, metavar='N', help='Data load workers (default: 4)')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize([x / 255 for x in [125.3, 123.0, 113.9]], [x / 255 for x in [63.0, 62.1, 66.7]])])
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
cp_list = glob.glob(args.cp_path+'*.pt')
best_model, best_acc = None, -float('inf')
for cp in cp_list:
ckpt = torch.load(cp, map_location = lambda storage, loc: storage)
softmax = get_sm_from_cp(ckpt)
if args.model == 'vgg':
model = vgg.VGG('VGG16', sm_type=softmax)
elif args.model == 'resnet':
model = resnet.ResNet18(sm_type=softmax)
elif args.model == 'densenet':
model = densenet.densenet_cifar(sm_type=softmax)
try:
model.load_state_dict(ckpt['model_state'], strict=True)
except RuntimeError as err:
print("Runtime Error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if args.cuda:
device = get_freer_gpu()
model = model.cuda(device)
model.eval()
correct = 0
with torch.no_grad():
iterator = tqdm(test_loader, total=len(test_loader))
for batch in iterator:
x, y = batch
x = x.to(device)
y = y.to(device)
embeddings = model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
out = model.out_proj(embeddings_norm, y)
pred = F.softmax(out, dim=1).max(1)[1].long()
correct += pred.squeeze().eq(y.squeeze()).detach().sum().item()
acc = 100.*correct/len(testset)
model_id = cp.split('/')[-1]
print('\nAccuracy of model {}: {}'.format(model_id, acc))
if acc>best_acc:
best_model, best_acc = model_id, acc
print('Best model and corresponding ACC: {} - {}'.format(best_model, best_acc)) | cifar10/eval_acc_all.py | from __future__ import print_function
import argparse
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from models import vgg, resnet, densenet
import numpy as np
import os
import sys
from tqdm import tqdm
from utils import *
import glob
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cifar10 Evaluation')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for cps')
parser.add_argument('--data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 100)')
parser.add_argument('--model', choices=['vgg', 'resnet', 'densenet'], default='resnet')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--workers', type=int, default=4, metavar='N', help='Data load workers (default: 4)')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize([x / 255 for x in [125.3, 123.0, 113.9]], [x / 255 for x in [63.0, 62.1, 66.7]])])
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
cp_list = glob.glob(args.cp_path+'*.pt')
best_model, best_acc = None, -float('inf')
for cp in cp_list:
ckpt = torch.load(cp, map_location = lambda storage, loc: storage)
softmax = get_sm_from_cp(ckpt)
if args.model == 'vgg':
model = vgg.VGG('VGG16', sm_type=softmax)
elif args.model == 'resnet':
model = resnet.ResNet18(sm_type=softmax)
elif args.model == 'densenet':
model = densenet.densenet_cifar(sm_type=softmax)
try:
model.load_state_dict(ckpt['model_state'], strict=True)
except RuntimeError as err:
print("Runtime Error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if args.cuda:
device = get_freer_gpu()
model = model.cuda(device)
model.eval()
correct = 0
with torch.no_grad():
iterator = tqdm(test_loader, total=len(test_loader))
for batch in iterator:
x, y = batch
x = x.to(device)
y = y.to(device)
embeddings = model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
out = model.out_proj(embeddings_norm, y)
pred = F.softmax(out, dim=1).max(1)[1].long()
correct += pred.squeeze().eq(y.squeeze()).detach().sum().item()
acc = 100.*correct/len(testset)
model_id = cp.split('/')[-1]
print('\nAccuracy of model {}: {}'.format(model_id, acc))
if acc>best_acc:
best_model, best_acc = model_id, acc
print('Best model and corresponding ACC: {} - {}'.format(best_model, best_acc)) | 0.397354 | 0.136868 |
import os
import sys
from copy import deepcopy
from glob import glob
MODNAME = '_builtin'
def _copy(obj, **kws):
"""copy an object"""
return deepcopy(obj)
def _parent(name, _larch=None, **kw):
"print out parent group name of an object"
print(_larch.symtable._lookup(name, create=False))
def _ls(directory='.', _larch=None, **kws):
"""return a list of files in the current directory"""
directory.strip()
if len(directory) == 0:
arg = '.'
if os.path.isdir(directory):
ret = os.listdir(directory)
else:
ret = glob(directory)
if sys.platform == 'win32':
for i in range(len(ret)):
ret[i] = ret[i].replace('\\','/')
return ret
def _cwd(**kws):
"return current working directory"
ret = os.getcwd()
if sys.platform == 'win32':
ret = ret.replace('\\','/')
return ret
def _cd(name, **kws):
"""change directory to specified directory"""
name = name.strip()
if name:
os.chdir(name)
ret = os.getcwd()
if sys.platform == 'win32':
ret = ret.replace('\\','/')
return ret
def show_more(text, filename=None, writer=None,
pagelength=30, prefix='', _larch=None, **kws):
"""show lines of text in the style of more """
txt = text[:]
if isinstance(txt, (str, unicode)):
txt = txt.split('\n')
if len(txt) <1:
return
prompt = '== hit return for more, q to quit'
ps = "%s (%%.2f%%%%) == " % prompt
if filename:
ps = "%s (%%.2f%%%% of %s) == " % (prompt, filename)
if writer is None:
writer = sys.stdout
i = 0
for i in range(len(txt)):
if txt[i].endswith('\n'):
_larch.writer.write("%s%s" % (prefix, txt[i]))
else:
writer.write("%s%s\n" % (prefix, txt[i]))
i = i + 1
if i % pagelength == 0:
try:
x = raw_input(ps % (100.*i/len(txt)))
if x in ('q','Q'): return
except KeyboardInterrupt:
writer.write("\n")
return
def _more(fname, pagelength=32, _larch=None, **kws):
"""list file contents:
> more('file.txt')
by default, the file is shown 32 lines at a time.
You can specify the number of lines to show at a time
with the pagelength option:
> more('file.txt', pagelength=10)
"""
output = sys.stdout.write
if _larch is not None:
output = _larch.writer.write
if not os.path.exists(fname):
output("File '%s' not found.\n" % fname)
return
elif not os.path.isfile(fname):
output("'%s' not a file.\n" % fname)
return
try:
text = open(fname, 'r').readlines()
except IOError:
output("cannot open file: %s\n" % fname)
return
show_more(text, filename=fname, _larch=_larch,
pagelength=pagelength, **kws)
def registerLarchPlugin():
return ('_builtin', {'copy': _copy, 'more': _more,
'parent': _parent, 'ls': _ls,
'cd': _cd, 'cwd': _cwd }) | plugins/std/shellutils.py | import os
import sys
from copy import deepcopy
from glob import glob
MODNAME = '_builtin'
def _copy(obj, **kws):
"""copy an object"""
return deepcopy(obj)
def _parent(name, _larch=None, **kw):
"print out parent group name of an object"
print(_larch.symtable._lookup(name, create=False))
def _ls(directory='.', _larch=None, **kws):
"""return a list of files in the current directory"""
directory.strip()
if len(directory) == 0:
arg = '.'
if os.path.isdir(directory):
ret = os.listdir(directory)
else:
ret = glob(directory)
if sys.platform == 'win32':
for i in range(len(ret)):
ret[i] = ret[i].replace('\\','/')
return ret
def _cwd(**kws):
"return current working directory"
ret = os.getcwd()
if sys.platform == 'win32':
ret = ret.replace('\\','/')
return ret
def _cd(name, **kws):
"""change directory to specified directory"""
name = name.strip()
if name:
os.chdir(name)
ret = os.getcwd()
if sys.platform == 'win32':
ret = ret.replace('\\','/')
return ret
def show_more(text, filename=None, writer=None,
pagelength=30, prefix='', _larch=None, **kws):
"""show lines of text in the style of more """
txt = text[:]
if isinstance(txt, (str, unicode)):
txt = txt.split('\n')
if len(txt) <1:
return
prompt = '== hit return for more, q to quit'
ps = "%s (%%.2f%%%%) == " % prompt
if filename:
ps = "%s (%%.2f%%%% of %s) == " % (prompt, filename)
if writer is None:
writer = sys.stdout
i = 0
for i in range(len(txt)):
if txt[i].endswith('\n'):
_larch.writer.write("%s%s" % (prefix, txt[i]))
else:
writer.write("%s%s\n" % (prefix, txt[i]))
i = i + 1
if i % pagelength == 0:
try:
x = raw_input(ps % (100.*i/len(txt)))
if x in ('q','Q'): return
except KeyboardInterrupt:
writer.write("\n")
return
def _more(fname, pagelength=32, _larch=None, **kws):
"""list file contents:
> more('file.txt')
by default, the file is shown 32 lines at a time.
You can specify the number of lines to show at a time
with the pagelength option:
> more('file.txt', pagelength=10)
"""
output = sys.stdout.write
if _larch is not None:
output = _larch.writer.write
if not os.path.exists(fname):
output("File '%s' not found.\n" % fname)
return
elif not os.path.isfile(fname):
output("'%s' not a file.\n" % fname)
return
try:
text = open(fname, 'r').readlines()
except IOError:
output("cannot open file: %s\n" % fname)
return
show_more(text, filename=fname, _larch=_larch,
pagelength=pagelength, **kws)
def registerLarchPlugin():
return ('_builtin', {'copy': _copy, 'more': _more,
'parent': _parent, 'ls': _ls,
'cd': _cd, 'cwd': _cwd }) | 0.160102 | 0.063628 |
import datetime
import ddt
from ggrc.models import all_models
from ggrc.utils import errors
from integration.ggrc import api_helper
from integration.ggrc.models import factories
from integration.ggrc import TestCase
class TestProgram(TestCase):
"""Program test cases."""
def setUp(self):
self.api = api_helper.Api()
with factories.single_commit():
self.program = factories.ProgramFactory()
self.audit_id = factories.AuditFactory(program=self.program).id
def test_put_empty_audits(self):
"""Audit doesn't get deleted when empty audits field is put."""
response = self.api.put(self.program, data={"audits": []})
self.assert200(response)
audit = self.refresh_object(all_models.Audit, id_=self.audit_id)
self.assertIsNotNone(audit)
def test_delete_with_audits(self):
"""Test deletion of a program with a mapped audit"""
response = self.api.delete(self.program)
self.assertEqual(response.status_code, 409)
self.assertEqual(
response.json,
{
"message": errors.MAPPED_AUDITS,
"code": 409,
}
)
def test_delete_without_audits(self):
"""Test deletion of a program with a mapped audit"""
response = self.api.delete(self.program.audits[0])
self.assert200(response)
response = self.api.delete(self.program)
self.assert200(response)
def test_create_wrong_recipients(self):
"""Test creation of a program with a wrong recipients"""
data = [{
'program': {
'status': 'Draft',
'kind': 'Directive',
'send_by_default': True,
'managers': ['<EMAIL>'],
'recipients': 'Admin,Primary Contacts,Secondary Contacts',
'title': 'Program_Test',
'review': {
'status': 'Unreviewed',
},
'access_control_list': [],
'slug': 'program_test'
}
}]
response = self.api.post(all_models.Program, data=data)
self.assert400(response)
self.assertIn(
u"Value should be either empty or comma separated list of",
response.json[0][1]
)
@ddt.ddt
class TestProgramVersionHistory(TestCase):
"""Test Version History for Program"""
def setUp(self):
super(TestProgramVersionHistory, self).setUp()
self.api = api_helper.Api()
with factories.single_commit():
self.program = factories.ProgramFactory(
title="Test Program",
description="Program Description",
slug="PROGRAM-2346",
start_date=datetime.date(2019, 6, 1),
end_date=datetime.date(2019, 6, 2),
updated_at=datetime.date(2019, 6, 2),
folder="Program Folder",
notes="Program Notes",
status="Draft",
)
self.cad = factories.CustomAttributeDefinitionFactory(
title="test cad",
definition_type="program",
definition_id=self.program.id,
attribute_type="Text",
mandatory=True,
)
self.cav = factories.CustomAttributeValueFactory(
custom_attribute=self.cad,
attributable=self.program,
attribute_value="Text",
)
@ddt.data(
("title", "Prev Program Title", True),
("description", "Prev Program Decription", True),
("slug", "PREV-PROGRAM-SLUG", False),
("folder", "Prev Program Folder", False),
("status", "Active", True),
("notes", "Prev Program Notes", True),
("start_date", datetime.date(2019, 5, 1), True),
("end_date", datetime.date(2019, 5, 2), False),
("updated_at", datetime.date(2019, 5, 2), False),
)
@ddt.unpack
def test_restore_attr_from_history(self, attr_name,
attr_value, restored):
"""Test only allowed fields can be restored from Version History"""
response = self.api.put(self.program, data={attr_name: attr_value})
self.assert200(response)
self.program = self.refresh_object(
all_models.Program,
id_=self.program.id,
)
self.assertEqual(
getattr(self.program, attr_name, None) == attr_value,
restored,
)
def test_restore_cav_from_history(self):
"""Test CAV can be restored from Version History"""
prev_cav_value = "Prev Text"
prev_cav = self.cav.log_json()
prev_cav.update({"attribute_value": prev_cav_value})
response = self.api.put(
self.program,
data={
"custom_attribute_definitions": [self.cad.log_json()],
"custom_attribute_values": [prev_cav]
},
)
self.assert200(response)
self.program = self.refresh_object(
all_models.Program,
id_=self.program.id,
)
self.assertEqual(
self.program.custom_attribute_values[0].attribute_value,
prev_cav_value
)
class TestMegaProgram(TestCase):
"""Mega Program test cases"""
def setUp(self):
"""Setup tests"""
self.api = api_helper.Api()
def test_is_mega_attr(self):
"""Test is_mega attribute of program"""
with factories.single_commit():
program_child = factories.ProgramFactory()
program_parent = factories.ProgramFactory()
factories.RelationshipFactory(source=program_parent,
destination=program_child)
program_child_id = program_child.id
program_parent_id = program_parent.id
response = self.api.get(all_models.Program, program_child_id)
self.assertEqual(response.json["program"]["is_mega"], False)
response = self.api.get(all_models.Program, program_parent_id)
self.assertEqual(response.json["program"]["is_mega"], True)
def test_program_relatives(self):
"""Test program children and parents
+--C<--+
| |
v |
A<-----B<-----E<----F
|
|
D<-----+
"""
with factories.single_commit():
program_a = factories.ProgramFactory()
program_b = factories.ProgramFactory()
program_c = factories.ProgramFactory()
program_d = factories.ProgramFactory()
program_e = factories.ProgramFactory()
program_f = factories.ProgramFactory()
factories.RelationshipFactory(source=program_b,
destination=program_a)
factories.RelationshipFactory(source=program_c,
destination=program_b)
factories.RelationshipFactory(source=program_e,
destination=program_d)
factories.RelationshipFactory(source=program_e,
destination=program_b)
factories.RelationshipFactory(source=program_e,
destination=program_c)
factories.RelationshipFactory(source=program_f,
destination=program_e)
parents_b = all_models.Program.get_all_relatives_ids(program_b.id,
"parents")
children_b = all_models.Program.get_all_relatives_ids(program_b.id,
"children")
parents_e = all_models.Program.get_all_relatives_ids(program_e.id,
"parents")
children_e = all_models.Program.get_all_relatives_ids(program_e.id,
"children")
self.assertEqual(parents_b, {program_c.id, program_e.id, program_f.id})
self.assertEqual(children_b, {program_a.id, })
self.assertEqual(parents_e, {program_f.id, })
self.assertEqual(children_e, {program_c.id, program_b.id,
program_d.id, program_a.id})
def test_program_cycle_relatives(self):
"""Test programs cycle children and parents
+-->C--+
| |
| v
A<-----B
"""
with factories.single_commit():
program_a = factories.ProgramFactory()
program_b = factories.ProgramFactory()
program_c = factories.ProgramFactory()
factories.RelationshipFactory(source=program_b,
destination=program_a)
factories.RelationshipFactory(source=program_c,
destination=program_b)
factories.RelationshipFactory(source=program_a,
destination=program_c)
parents_b = all_models.Program.get_all_relatives_ids(program_b.id,
"parents")
children_b = all_models.Program.get_all_relatives_ids(program_b.id,
"children")
self.assertEqual(parents_b, {program_a.id, program_c.id})
self.assertEqual(children_b, {program_a.id, program_c.id}) | test/integration/ggrc/models/test_program.py | import datetime
import ddt
from ggrc.models import all_models
from ggrc.utils import errors
from integration.ggrc import api_helper
from integration.ggrc.models import factories
from integration.ggrc import TestCase
class TestProgram(TestCase):
"""Program test cases."""
def setUp(self):
self.api = api_helper.Api()
with factories.single_commit():
self.program = factories.ProgramFactory()
self.audit_id = factories.AuditFactory(program=self.program).id
def test_put_empty_audits(self):
"""Audit doesn't get deleted when empty audits field is put."""
response = self.api.put(self.program, data={"audits": []})
self.assert200(response)
audit = self.refresh_object(all_models.Audit, id_=self.audit_id)
self.assertIsNotNone(audit)
def test_delete_with_audits(self):
"""Test deletion of a program with a mapped audit"""
response = self.api.delete(self.program)
self.assertEqual(response.status_code, 409)
self.assertEqual(
response.json,
{
"message": errors.MAPPED_AUDITS,
"code": 409,
}
)
def test_delete_without_audits(self):
"""Test deletion of a program with a mapped audit"""
response = self.api.delete(self.program.audits[0])
self.assert200(response)
response = self.api.delete(self.program)
self.assert200(response)
def test_create_wrong_recipients(self):
"""Test creation of a program with a wrong recipients"""
data = [{
'program': {
'status': 'Draft',
'kind': 'Directive',
'send_by_default': True,
'managers': ['<EMAIL>'],
'recipients': 'Admin,Primary Contacts,Secondary Contacts',
'title': 'Program_Test',
'review': {
'status': 'Unreviewed',
},
'access_control_list': [],
'slug': 'program_test'
}
}]
response = self.api.post(all_models.Program, data=data)
self.assert400(response)
self.assertIn(
u"Value should be either empty or comma separated list of",
response.json[0][1]
)
@ddt.ddt
class TestProgramVersionHistory(TestCase):
"""Test Version History for Program"""
def setUp(self):
super(TestProgramVersionHistory, self).setUp()
self.api = api_helper.Api()
with factories.single_commit():
self.program = factories.ProgramFactory(
title="Test Program",
description="Program Description",
slug="PROGRAM-2346",
start_date=datetime.date(2019, 6, 1),
end_date=datetime.date(2019, 6, 2),
updated_at=datetime.date(2019, 6, 2),
folder="Program Folder",
notes="Program Notes",
status="Draft",
)
self.cad = factories.CustomAttributeDefinitionFactory(
title="test cad",
definition_type="program",
definition_id=self.program.id,
attribute_type="Text",
mandatory=True,
)
self.cav = factories.CustomAttributeValueFactory(
custom_attribute=self.cad,
attributable=self.program,
attribute_value="Text",
)
@ddt.data(
("title", "Prev Program Title", True),
("description", "Prev Program Decription", True),
("slug", "PREV-PROGRAM-SLUG", False),
("folder", "Prev Program Folder", False),
("status", "Active", True),
("notes", "Prev Program Notes", True),
("start_date", datetime.date(2019, 5, 1), True),
("end_date", datetime.date(2019, 5, 2), False),
("updated_at", datetime.date(2019, 5, 2), False),
)
@ddt.unpack
def test_restore_attr_from_history(self, attr_name,
attr_value, restored):
"""Test only allowed fields can be restored from Version History"""
response = self.api.put(self.program, data={attr_name: attr_value})
self.assert200(response)
self.program = self.refresh_object(
all_models.Program,
id_=self.program.id,
)
self.assertEqual(
getattr(self.program, attr_name, None) == attr_value,
restored,
)
def test_restore_cav_from_history(self):
"""Test CAV can be restored from Version History"""
prev_cav_value = "Prev Text"
prev_cav = self.cav.log_json()
prev_cav.update({"attribute_value": prev_cav_value})
response = self.api.put(
self.program,
data={
"custom_attribute_definitions": [self.cad.log_json()],
"custom_attribute_values": [prev_cav]
},
)
self.assert200(response)
self.program = self.refresh_object(
all_models.Program,
id_=self.program.id,
)
self.assertEqual(
self.program.custom_attribute_values[0].attribute_value,
prev_cav_value
)
class TestMegaProgram(TestCase):
"""Mega Program test cases"""
def setUp(self):
"""Setup tests"""
self.api = api_helper.Api()
def test_is_mega_attr(self):
"""Test is_mega attribute of program"""
with factories.single_commit():
program_child = factories.ProgramFactory()
program_parent = factories.ProgramFactory()
factories.RelationshipFactory(source=program_parent,
destination=program_child)
program_child_id = program_child.id
program_parent_id = program_parent.id
response = self.api.get(all_models.Program, program_child_id)
self.assertEqual(response.json["program"]["is_mega"], False)
response = self.api.get(all_models.Program, program_parent_id)
self.assertEqual(response.json["program"]["is_mega"], True)
def test_program_relatives(self):
"""Test program children and parents
+--C<--+
| |
v |
A<-----B<-----E<----F
|
|
D<-----+
"""
with factories.single_commit():
program_a = factories.ProgramFactory()
program_b = factories.ProgramFactory()
program_c = factories.ProgramFactory()
program_d = factories.ProgramFactory()
program_e = factories.ProgramFactory()
program_f = factories.ProgramFactory()
factories.RelationshipFactory(source=program_b,
destination=program_a)
factories.RelationshipFactory(source=program_c,
destination=program_b)
factories.RelationshipFactory(source=program_e,
destination=program_d)
factories.RelationshipFactory(source=program_e,
destination=program_b)
factories.RelationshipFactory(source=program_e,
destination=program_c)
factories.RelationshipFactory(source=program_f,
destination=program_e)
parents_b = all_models.Program.get_all_relatives_ids(program_b.id,
"parents")
children_b = all_models.Program.get_all_relatives_ids(program_b.id,
"children")
parents_e = all_models.Program.get_all_relatives_ids(program_e.id,
"parents")
children_e = all_models.Program.get_all_relatives_ids(program_e.id,
"children")
self.assertEqual(parents_b, {program_c.id, program_e.id, program_f.id})
self.assertEqual(children_b, {program_a.id, })
self.assertEqual(parents_e, {program_f.id, })
self.assertEqual(children_e, {program_c.id, program_b.id,
program_d.id, program_a.id})
def test_program_cycle_relatives(self):
"""Test programs cycle children and parents
+-->C--+
| |
| v
A<-----B
"""
with factories.single_commit():
program_a = factories.ProgramFactory()
program_b = factories.ProgramFactory()
program_c = factories.ProgramFactory()
factories.RelationshipFactory(source=program_b,
destination=program_a)
factories.RelationshipFactory(source=program_c,
destination=program_b)
factories.RelationshipFactory(source=program_a,
destination=program_c)
parents_b = all_models.Program.get_all_relatives_ids(program_b.id,
"parents")
children_b = all_models.Program.get_all_relatives_ids(program_b.id,
"children")
self.assertEqual(parents_b, {program_a.id, program_c.id})
self.assertEqual(children_b, {program_a.id, program_c.id}) | 0.659734 | 0.347177 |
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_main.models import Entry, System, Systemstatus
class EntryModelTestCase(TestCase):
""" entry model tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_entry', password='<PASSWORD>')
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
system_1 = System.objects.create(
system_name='system_1',
systemstatus = systemstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
# create object
Entry.objects.create(
system = system_1,
entry_time = timezone.now(),
entry_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
entry_created_by_user_id = test_user,
entry_modified_by_user_id = test_user,
)
def test_entry_string(self):
""" test string representation """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get object id
entry_id = entry_1.entry_id
# compare
self.assertEqual(str(entry_1), str(entry_id) + ' | ' + str(entry_1.system) + ' | ' + 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
def test_entry_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_id').verbose_name
# compare
self.assertEqual(field_label, 'entry id')
def test_system_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('system').verbose_name
# compare
self.assertEqual(field_label, 'system')
def test_entry_case_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('case').verbose_name
# compare
self.assertEqual(field_label, 'case')
def test_entry_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_time').verbose_name
# compare
self.assertEqual(field_label, 'entry time')
def test_entry_sha1_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_sha1').verbose_name
# compare
self.assertEqual(field_label, 'entry sha1')
def test_entry_date_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_date').verbose_name
# compare
self.assertEqual(field_label, 'entry date')
def test_entry_utc_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_utc').verbose_name
# compare
self.assertEqual(field_label, 'entry utc')
def test_entry_system_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_system').verbose_name
# compare
self.assertEqual(field_label, 'entry system')
def test_entry_type_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_type').verbose_name
# compare
self.assertEqual(field_label, 'entry type')
def test_entry_content_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_content').verbose_name
# compare
self.assertEqual(field_label, 'entry content')
def test_entry_note_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_note').verbose_name
# compare
self.assertEqual(field_label, 'entry note')
def test_entry_create_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_create_time').verbose_name
# compare
self.assertEqual(field_label, 'entry create time')
def test_entry_modify_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_modify_time').verbose_name
# compare
self.assertEqual(field_label, 'entry modify time')
def test_entry_api_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_api_time').verbose_name
# compare
self.assertEqual(field_label, 'entry api time')
def test_entry_created_by_user_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_created_by_user_id').verbose_name
# compare
self.assertEqual(field_label, 'entry created by user id')
def test_entry_modified_by_user_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_modified_by_user_id').verbose_name
# compare
self.assertEqual(field_label, 'entry modified by user id')
def test_entry_sha1_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_sha1').max_length
# compare
self.assertEqual(max_length, 40)
def test_entry_date_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_date').max_length
# compare
self.assertEqual(max_length, 10)
def test_entry_utc_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_utc').max_length
# compare
self.assertEqual(max_length, 8)
def test_entry_system_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_system').max_length
# compare
self.assertEqual(max_length, 30)
def test_entry_type_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_type').max_length
# compare
self.assertEqual(max_length, 30) | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/entry/test_entry_models.py | from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_main.models import Entry, System, Systemstatus
class EntryModelTestCase(TestCase):
""" entry model tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_entry', password='<PASSWORD>')
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
system_1 = System.objects.create(
system_name='system_1',
systemstatus = systemstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
# create object
Entry.objects.create(
system = system_1,
entry_time = timezone.now(),
entry_sha1 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
entry_created_by_user_id = test_user,
entry_modified_by_user_id = test_user,
)
def test_entry_string(self):
""" test string representation """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get object id
entry_id = entry_1.entry_id
# compare
self.assertEqual(str(entry_1), str(entry_id) + ' | ' + str(entry_1.system) + ' | ' + 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
def test_entry_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_id').verbose_name
# compare
self.assertEqual(field_label, 'entry id')
def test_system_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('system').verbose_name
# compare
self.assertEqual(field_label, 'system')
def test_entry_case_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('case').verbose_name
# compare
self.assertEqual(field_label, 'case')
def test_entry_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_time').verbose_name
# compare
self.assertEqual(field_label, 'entry time')
def test_entry_sha1_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_sha1').verbose_name
# compare
self.assertEqual(field_label, 'entry sha1')
def test_entry_date_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_date').verbose_name
# compare
self.assertEqual(field_label, 'entry date')
def test_entry_utc_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_utc').verbose_name
# compare
self.assertEqual(field_label, 'entry utc')
def test_entry_system_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_system').verbose_name
# compare
self.assertEqual(field_label, 'entry system')
def test_entry_type_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_type').verbose_name
# compare
self.assertEqual(field_label, 'entry type')
def test_entry_content_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_content').verbose_name
# compare
self.assertEqual(field_label, 'entry content')
def test_entry_note_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_note').verbose_name
# compare
self.assertEqual(field_label, 'entry note')
def test_entry_create_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_create_time').verbose_name
# compare
self.assertEqual(field_label, 'entry create time')
def test_entry_modify_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_modify_time').verbose_name
# compare
self.assertEqual(field_label, 'entry modify time')
def test_entry_api_time_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_api_time').verbose_name
# compare
self.assertEqual(field_label, 'entry api time')
def test_entry_created_by_user_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_created_by_user_id').verbose_name
# compare
self.assertEqual(field_label, 'entry created by user id')
def test_entry_modified_by_user_id_attribute_label(self):
""" test attribute label """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get label
field_label = entry_1._meta.get_field('entry_modified_by_user_id').verbose_name
# compare
self.assertEqual(field_label, 'entry modified by user id')
def test_entry_sha1_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_sha1').max_length
# compare
self.assertEqual(max_length, 40)
def test_entry_date_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_date').max_length
# compare
self.assertEqual(max_length, 10)
def test_entry_utc_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_utc').max_length
# compare
self.assertEqual(max_length, 8)
def test_entry_system_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_system').max_length
# compare
self.assertEqual(max_length, 30)
def test_entry_type_length(self):
""" test for max length """
# get object
entry_1 = Entry.objects.get(entry_sha1='da39a3ee5e6b4b0d3255bfef95601890afd80709')
# get max length
max_length = entry_1._meta.get_field('entry_type').max_length
# compare
self.assertEqual(max_length, 30) | 0.577495 | 0.195959 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RunBook']
class RunBook(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
job_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RunBookJobScheduleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
log_progress: Optional[pulumi.Input[bool]] = None,
log_verbose: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
publish_content_link: Optional[pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runbook_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Automation Runbook.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic")
example_run_book = azure.automation.RunBook("exampleRunBook",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
log_verbose=True,
log_progress=True,
description="This is an example runbook",
runbook_type="PowerShellWorkflow",
publish_content_link=azure.automation.RunBookPublishContentLinkArgs(
uri="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/c4935ffb69246a6058eb24f54640f53f69d3ac9f/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1",
))
```
## Import
Automation Runbooks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/runBook:RunBook Get-AzureVMTutorial /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/runbooks/Get-AzureVMTutorial
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] content: The desired content of the runbook.
:param pulumi.Input[str] description: A description for this credential.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] log_progress: Progress log option.
:param pulumi.Input[bool] log_verbose: Verbose log option.
:param pulumi.Input[str] name: Specifies the name of the Runbook. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']] publish_content_link: The published runbook content link.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] runbook_type: The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if automation_account_name is None:
raise TypeError("Missing required property 'automation_account_name'")
__props__['automation_account_name'] = automation_account_name
__props__['content'] = content
__props__['description'] = description
__props__['job_schedules'] = job_schedules
__props__['location'] = location
if log_progress is None:
raise TypeError("Missing required property 'log_progress'")
__props__['log_progress'] = log_progress
if log_verbose is None:
raise TypeError("Missing required property 'log_verbose'")
__props__['log_verbose'] = log_verbose
__props__['name'] = name
__props__['publish_content_link'] = publish_content_link
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if runbook_type is None:
raise TypeError("Missing required property 'runbook_type'")
__props__['runbook_type'] = runbook_type
__props__['tags'] = tags
super(RunBook, __self__).__init__(
'azure:automation/runBook:RunBook',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
job_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RunBookJobScheduleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
log_progress: Optional[pulumi.Input[bool]] = None,
log_verbose: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
publish_content_link: Optional[pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runbook_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RunBook':
"""
Get an existing RunBook resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] content: The desired content of the runbook.
:param pulumi.Input[str] description: A description for this credential.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] log_progress: Progress log option.
:param pulumi.Input[bool] log_verbose: Verbose log option.
:param pulumi.Input[str] name: Specifies the name of the Runbook. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']] publish_content_link: The published runbook content link.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] runbook_type: The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["automation_account_name"] = automation_account_name
__props__["content"] = content
__props__["description"] = description
__props__["job_schedules"] = job_schedules
__props__["location"] = location
__props__["log_progress"] = log_progress
__props__["log_verbose"] = log_verbose
__props__["name"] = name
__props__["publish_content_link"] = publish_content_link
__props__["resource_group_name"] = resource_group_name
__props__["runbook_type"] = runbook_type
__props__["tags"] = tags
return RunBook(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Output[str]:
"""
The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@property
@pulumi.getter
def content(self) -> pulumi.Output[str]:
"""
The desired content of the runbook.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this credential.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="jobSchedules")
def job_schedules(self) -> pulumi.Output[Sequence['outputs.RunBookJobSchedule']]:
return pulumi.get(self, "job_schedules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="logProgress")
def log_progress(self) -> pulumi.Output[bool]:
"""
Progress log option.
"""
return pulumi.get(self, "log_progress")
@property
@pulumi.getter(name="logVerbose")
def log_verbose(self) -> pulumi.Output[bool]:
"""
Verbose log option.
"""
return pulumi.get(self, "log_verbose")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Runbook. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publishContentLink")
def publish_content_link(self) -> pulumi.Output[Optional['outputs.RunBookPublishContentLink']]:
"""
The published runbook content link.
"""
return pulumi.get(self, "publish_content_link")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="runbookType")
def runbook_type(self) -> pulumi.Output[str]:
"""
The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
"""
return pulumi.get(self, "runbook_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_azure/automation/run_book.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RunBook']
class RunBook(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
job_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RunBookJobScheduleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
log_progress: Optional[pulumi.Input[bool]] = None,
log_verbose: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
publish_content_link: Optional[pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runbook_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Automation Runbook.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic")
example_run_book = azure.automation.RunBook("exampleRunBook",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
log_verbose=True,
log_progress=True,
description="This is an example runbook",
runbook_type="PowerShellWorkflow",
publish_content_link=azure.automation.RunBookPublishContentLinkArgs(
uri="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/c4935ffb69246a6058eb24f54640f53f69d3ac9f/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1",
))
```
## Import
Automation Runbooks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/runBook:RunBook Get-AzureVMTutorial /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/runbooks/Get-AzureVMTutorial
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] content: The desired content of the runbook.
:param pulumi.Input[str] description: A description for this credential.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] log_progress: Progress log option.
:param pulumi.Input[bool] log_verbose: Verbose log option.
:param pulumi.Input[str] name: Specifies the name of the Runbook. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']] publish_content_link: The published runbook content link.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] runbook_type: The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if automation_account_name is None:
raise TypeError("Missing required property 'automation_account_name'")
__props__['automation_account_name'] = automation_account_name
__props__['content'] = content
__props__['description'] = description
__props__['job_schedules'] = job_schedules
__props__['location'] = location
if log_progress is None:
raise TypeError("Missing required property 'log_progress'")
__props__['log_progress'] = log_progress
if log_verbose is None:
raise TypeError("Missing required property 'log_verbose'")
__props__['log_verbose'] = log_verbose
__props__['name'] = name
__props__['publish_content_link'] = publish_content_link
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if runbook_type is None:
raise TypeError("Missing required property 'runbook_type'")
__props__['runbook_type'] = runbook_type
__props__['tags'] = tags
super(RunBook, __self__).__init__(
'azure:automation/runBook:RunBook',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
job_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RunBookJobScheduleArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
log_progress: Optional[pulumi.Input[bool]] = None,
log_verbose: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
publish_content_link: Optional[pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
runbook_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RunBook':
"""
Get an existing RunBook resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] content: The desired content of the runbook.
:param pulumi.Input[str] description: A description for this credential.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] log_progress: Progress log option.
:param pulumi.Input[bool] log_verbose: Verbose log option.
:param pulumi.Input[str] name: Specifies the name of the Runbook. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['RunBookPublishContentLinkArgs']] publish_content_link: The published runbook content link.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] runbook_type: The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["automation_account_name"] = automation_account_name
__props__["content"] = content
__props__["description"] = description
__props__["job_schedules"] = job_schedules
__props__["location"] = location
__props__["log_progress"] = log_progress
__props__["log_verbose"] = log_verbose
__props__["name"] = name
__props__["publish_content_link"] = publish_content_link
__props__["resource_group_name"] = resource_group_name
__props__["runbook_type"] = runbook_type
__props__["tags"] = tags
return RunBook(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Output[str]:
"""
The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@property
@pulumi.getter
def content(self) -> pulumi.Output[str]:
"""
The desired content of the runbook.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this credential.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="jobSchedules")
def job_schedules(self) -> pulumi.Output[Sequence['outputs.RunBookJobSchedule']]:
return pulumi.get(self, "job_schedules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="logProgress")
def log_progress(self) -> pulumi.Output[bool]:
"""
Progress log option.
"""
return pulumi.get(self, "log_progress")
@property
@pulumi.getter(name="logVerbose")
def log_verbose(self) -> pulumi.Output[bool]:
"""
Verbose log option.
"""
return pulumi.get(self, "log_verbose")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Runbook. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publishContentLink")
def publish_content_link(self) -> pulumi.Output[Optional['outputs.RunBookPublishContentLink']]:
"""
The published runbook content link.
"""
return pulumi.get(self, "publish_content_link")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="runbookType")
def runbook_type(self) -> pulumi.Output[str]:
"""
The type of the runbook - can be either `Graph`, `GraphPowerShell`, `GraphPowerShellWorkflow`, `PowerShellWorkflow`, `PowerShell` or `Script`.
"""
return pulumi.get(self, "runbook_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.817319 | 0.22051 |
import pytest
import sys
sys.path.append('../')
from server.parsing.utils import create_chat_df, create_students_df
import os
# TODO: need to load the file as FileStorage type and then start testing
@pytest.fixture
def folders():
CHAT_FILES_FOLDER = "./files_to_test/chat_files"
STUDENT_EXCEL_FILES_FOLDER = "./files_to_test/students_list_excel"
return {"chat_folder": CHAT_FILES_FOLDER, "student_list_folder": STUDENT_EXCEL_FILES_FOLDER}
student_list_files_data = [
("example_csv.csv", True),
("example_csv_2.csv", True),
("example_csv_3.csv", True),
("example_csv_4.csv", True),
("example_excel.xlsx", True),
("example_excel_start_in_random_row.xlsx", True),
("example_mashov_file_edited_and_saved_97.xls", True),
("example_mashov_file_edited_and_saved_97_with_filled_data.xls", True),
("רשימה מקורית.xls", True),
]
student_list_files_data_problems = [
"example_mashov_file_empty.xls",
"example_excel_too_much_records.xlsx"
]
class TestChatAndStudent:
chat_files_data_regular = [
("chat_file_valid.txt", False)
]
@pytest.mark.parametrize(("file_name", "expected_output"), chat_files_data_regular)
def test_create_chat_df_validation_regular(self, folders, file_name, expected_output):
with open(os.path.join(folders["chat_folder"], file_name), "r", encoding="utf-8") as f:
chat_df = create_chat_df(f.readlines())
assert chat_df.empty == expected_output
chat_files_data_empty = [
("chat_file_empty.txt", True),
("chat_file_not_structured.txt", True),
("chat_file_not_structured_partially.txt", True),
]
@pytest.mark.parametrize(("file_name", "expected_output"), chat_files_data_empty)
def test_create_chat_df_validation_empty(self, folders, file_name, expected_output):
with open(os.path.join(folders["chat_folder"], file_name), "r", encoding="utf-8") as f:
with pytest.raises(ValueError):
assert create_chat_df(f.readlines())
@pytest.mark.parametrize(("file_name", "expected_output"), student_list_files_data)
def test_create_students_df_validation(self, folders, file_name, expected_output):
df_students = create_students_df(file_name, os.path.join(folders["student_list_folder"], file_name))
assert not df_students.empty == expected_output
@pytest.mark.parametrize("file_name", student_list_files_data_problems)
def test_create_students_df_validation_problem(self, folders, file_name):
with pytest.raises(ValueError):
assert create_students_df(file_name, os.path.join(folders["student_list_folder"], file_name)) | server/tests/test_parsing_utils.py | import pytest
import sys
sys.path.append('../')
from server.parsing.utils import create_chat_df, create_students_df
import os
# TODO: need to load the file as FileStorage type and then start testing
@pytest.fixture
def folders():
CHAT_FILES_FOLDER = "./files_to_test/chat_files"
STUDENT_EXCEL_FILES_FOLDER = "./files_to_test/students_list_excel"
return {"chat_folder": CHAT_FILES_FOLDER, "student_list_folder": STUDENT_EXCEL_FILES_FOLDER}
student_list_files_data = [
("example_csv.csv", True),
("example_csv_2.csv", True),
("example_csv_3.csv", True),
("example_csv_4.csv", True),
("example_excel.xlsx", True),
("example_excel_start_in_random_row.xlsx", True),
("example_mashov_file_edited_and_saved_97.xls", True),
("example_mashov_file_edited_and_saved_97_with_filled_data.xls", True),
("רשימה מקורית.xls", True),
]
student_list_files_data_problems = [
"example_mashov_file_empty.xls",
"example_excel_too_much_records.xlsx"
]
class TestChatAndStudent:
chat_files_data_regular = [
("chat_file_valid.txt", False)
]
@pytest.mark.parametrize(("file_name", "expected_output"), chat_files_data_regular)
def test_create_chat_df_validation_regular(self, folders, file_name, expected_output):
with open(os.path.join(folders["chat_folder"], file_name), "r", encoding="utf-8") as f:
chat_df = create_chat_df(f.readlines())
assert chat_df.empty == expected_output
chat_files_data_empty = [
("chat_file_empty.txt", True),
("chat_file_not_structured.txt", True),
("chat_file_not_structured_partially.txt", True),
]
@pytest.mark.parametrize(("file_name", "expected_output"), chat_files_data_empty)
def test_create_chat_df_validation_empty(self, folders, file_name, expected_output):
with open(os.path.join(folders["chat_folder"], file_name), "r", encoding="utf-8") as f:
with pytest.raises(ValueError):
assert create_chat_df(f.readlines())
@pytest.mark.parametrize(("file_name", "expected_output"), student_list_files_data)
def test_create_students_df_validation(self, folders, file_name, expected_output):
df_students = create_students_df(file_name, os.path.join(folders["student_list_folder"], file_name))
assert not df_students.empty == expected_output
@pytest.mark.parametrize("file_name", student_list_files_data_problems)
def test_create_students_df_validation_problem(self, folders, file_name):
with pytest.raises(ValueError):
assert create_students_df(file_name, os.path.join(folders["student_list_folder"], file_name)) | 0.209551 | 0.316871 |
import os
import argparse
import logging
from utils.sentence_scorer import SentenceScorer
from utils.tokenizer import Tokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO, datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
class Preprocessor:
def __init__(self, dataset, mode, device="cuda"):
self.dataset = dataset
self.mode = mode
self.tokenizer = Tokenizer()
if self.mode != "full":
self.sentence_scorer = SentenceScorer(device=device, reduce_mode="gmean")
def extract_incremental(self, splits, output_path):
"""
Extracts incremental examples from the D2T dataset.
"""
for split in splits:
logger.info(f"Processing {split} split")
entry_list = self.dataset.data[split]
lengths, idxs = self.dataset.sort_by_lengths(entry_list)
entries_out = []
prev_n = -1
idxs_pos = 1
for entry in entry_list:
triples = entry.triples
n = len(triples)
if n > prev_n:
# next size
logger.info(f"Parsing {n}-tuples")
beg = idxs[idxs_pos] if idxs_pos < len(idxs) else None
end = idxs[idxs_pos+1] if idxs_pos+1 < len(idxs) else None
prev_n = n
idxs_pos += 1
# corrupted items
if self.dataset.name == "e2e" and n < 2:
continue
# extract all incremental examples for the current entry
entries_out += self._extract(entry_list, entry, n, lengths, beg, end)
self.process(output_path, split, entries_out)
def _extract(self, entry_list, entry, n, lengths, beg, end):
"""
Extracts incremental examples for a single entry
"""
if n+1 not in lengths:
return []
entry_out_all = []
triples = entry.triples
for entry_p1 in entry_list[beg:end]:
triples_p1 = entry_p1.triples
assert len(triples) + 1 == len(triples_p1), \
f"l1: {len(triples)}, l2: {len(triples_p1)}"
if not self._is_incremental(triples, triples_p1):
continue
triple = [x for x in triples_p1 if x not in triples]
text_list = [lex["target_txt"] for lex in entry.lexs]
ref_list = [lex_p1["target_txt"] for lex_p1 in entry_p1.lexs]
entry_out = {
"text_list" : text_list,
"data" : triple,
"ref_list" : ref_list,
}
entry_out_all.append(entry_out)
return entry_out_all
def _is_incremental(self, triples, triples_p1):
"""
Checks if `triples_p1` (length n+1) contains all the triples from `triples` (length n)
"""
return all(x in triples_p1 for x in triples)
def process(self, out_dir, split, entryset_out):
"""
Processes and outputs training data for the sentence fusion model
"""
out_path = os.path.join(out_dir, self.mode)
os.makedirs(out_path, exist_ok=True)
f_in = open(os.path.join(out_path, f"{split}.in"), "w")
f_ref = open(os.path.join(out_path, f"{split}.ref"), "w")
entries_processed = 0
samples_processed = 0
log_step = 10
log_next_percentage = log_step
n = len(entryset_out)
for entry in entryset_out:
entries_processed += 1
if self.dataset.is_d2t:
pairs = self._get_lex_pairs(entry)
else:
pairs = [entry]
for inp, ref in pairs:
f_in.write(inp + "\n")
f_ref.write(ref + "\n")
samples_processed += 1
if int(100*entries_processed / n) == log_next_percentage:
logger.info(f"{entries_processed} entries processed, {samples_processed} samples extracted")
log_next_percentage += log_step
def save_orig_references(self, output_path):
ref_dir = os.path.join(output_path, "ref")
os.makedirs(ref_dir, exist_ok=True)
logger.info(f"Saving references to {ref_dir}")
for split in ["dev", "test"]:
f_ref = open(os.path.join(ref_dir, f"{split}.ref"), "w")
for entry in self.dataset.data[split]:
lexs = "\n".join([l['target_txt'] for l in entry.lexs])
f_ref.write(lexs + "\n\n")
def _fill_template(self, template, triple):
"""
Fills a template with the data from the triple
"""
template = template.replace("<subject>", triple.subj) \
.replace("<predicate>", triple.pred) \
.replace("<object>", triple.obj)
return template
def _get_lex_pairs(self, entry):
"""
Combines lexicalizations based on the selected mode
"""
inp_sents = []
assert len(entry["data"]) == 1
triple = entry["data"][0]
text_list = [self.tokenizer.tokenize(el) for el in entry["text_list"]]
ref_list = [self.tokenizer.tokenize(el) for el in entry["ref_list"]]
templates = self.dataset.get_templates(triple)
templates = [self._fill_template(template, triple) for template in templates]
templates = [self.tokenizer.tokenize(template) for template in templates]
pairs = []
if self.mode == "best":
text = self.sentence_scorer.select_best(text_list)
template = self.sentence_scorer.select_best(templates)
ref = self.sentence_scorer.select_best(ref_list)
inp = " ".join([text, template])
pairs.append((inp, ref))
elif self.mode == "best_tgt":
ref = self.sentence_scorer.select_best(ref_list)
for text in text_list:
for template in templates:
inp = " ".join([text, template])
pairs.append((inp, ref))
elif self.mode == "full":
for text in text_list:
for template in templates:
inp = " ".join([text, template])
for ref in ref_list:
pairs.append((inp, ref))
else:
raise ValueError("Unknown mode (available: 'best', 'best_tgt', 'full')")
return pairs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True,
help="Dataset class.")
parser.add_argument("--input", type=str, required=True,
help="Path to the dataset")
parser.add_argument("--mode", type=str, required=True,
help="Preprocess mode ('best', 'best_tgt', 'full')")
parser.add_argument("--lms_device", default="cpu", type=str, required=True,
help="Device for the sentence scorer ('cpu' / 'cuda').")
parser.add_argument("--force_generate_templates", action='store_true',
help="Re-generate the templates which can be generated automatically.")
parser.add_argument('--splits', type=str, nargs='+', default=["train", "dev", "test"],
help='Dataset splits (e.g. train dev test)')
args = parser.parse_args()
lms_device = 'cuda' if args.lms_device == 'gpu' else 'cpu'
# Load dataset class by name
try:
dataset_mod = __import__("datasets", fromlist=[args.dataset])
dataset_cls = getattr(dataset_mod, args.dataset)
dataset = dataset_cls()
except AttributeError as err:
logger.error(f"Unknown dataset: '{args.dataset}'. Please create a class '{args.dataset}' in 'datasets.py'.")
raise err
# Load data
logger.info(f"Loading dataset {args.dataset}")
try:
dataset.load_from_dir(path=args.input, splits=args.splits)
except FileNotFoundError as err:
logger.error(f"Dataset not found in {args.input}")
raise err
# Create output directory
try:
out_dirname = os.path.join("data", dataset.name)
os.makedirs(out_dirname, exist_ok=True)
except OSError as err:
logger.error(f"Output directory {out_dirname} can not be created")
raise err
# WebNLG / E2E / ...
if dataset.is_d2t:
# Load or extract templates
dataset.load_templates(out_dirname, args.force_generate_templates)
# Extract incremental examples
preprocessor = Preprocessor(dataset=dataset, mode=args.mode, device=lms_device)
logger.info(f"Processing the {args.dataset} dataset (mode={args.mode})")
preprocessor.extract_incremental(splits=args.splits, output_path=out_dirname)
# Extract references for later evaluation
if "dev" in args.splits and "test" in args.splits:
preprocessor.save_orig_references(output_path=out_dirname)
# DiscoFuse: dataset can be sent directly to output
else:
for split in args.splits:
preprocessor = Preprocessor(dataset=dataset, mode=args.mode, device=lms_device)
preprocessor.process(out_dirname, split, dataset.data[split])
logger.info(f"Preprocessing finished.") | preprocess.py |
import os
import argparse
import logging
from utils.sentence_scorer import SentenceScorer
from utils.tokenizer import Tokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO, datefmt='%H:%M:%S')
logger = logging.getLogger(__name__)
class Preprocessor:
def __init__(self, dataset, mode, device="cuda"):
self.dataset = dataset
self.mode = mode
self.tokenizer = Tokenizer()
if self.mode != "full":
self.sentence_scorer = SentenceScorer(device=device, reduce_mode="gmean")
def extract_incremental(self, splits, output_path):
"""
Extracts incremental examples from the D2T dataset.
"""
for split in splits:
logger.info(f"Processing {split} split")
entry_list = self.dataset.data[split]
lengths, idxs = self.dataset.sort_by_lengths(entry_list)
entries_out = []
prev_n = -1
idxs_pos = 1
for entry in entry_list:
triples = entry.triples
n = len(triples)
if n > prev_n:
# next size
logger.info(f"Parsing {n}-tuples")
beg = idxs[idxs_pos] if idxs_pos < len(idxs) else None
end = idxs[idxs_pos+1] if idxs_pos+1 < len(idxs) else None
prev_n = n
idxs_pos += 1
# corrupted items
if self.dataset.name == "e2e" and n < 2:
continue
# extract all incremental examples for the current entry
entries_out += self._extract(entry_list, entry, n, lengths, beg, end)
self.process(output_path, split, entries_out)
def _extract(self, entry_list, entry, n, lengths, beg, end):
"""
Extracts incremental examples for a single entry
"""
if n+1 not in lengths:
return []
entry_out_all = []
triples = entry.triples
for entry_p1 in entry_list[beg:end]:
triples_p1 = entry_p1.triples
assert len(triples) + 1 == len(triples_p1), \
f"l1: {len(triples)}, l2: {len(triples_p1)}"
if not self._is_incremental(triples, triples_p1):
continue
triple = [x for x in triples_p1 if x not in triples]
text_list = [lex["target_txt"] for lex in entry.lexs]
ref_list = [lex_p1["target_txt"] for lex_p1 in entry_p1.lexs]
entry_out = {
"text_list" : text_list,
"data" : triple,
"ref_list" : ref_list,
}
entry_out_all.append(entry_out)
return entry_out_all
def _is_incremental(self, triples, triples_p1):
"""
Checks if `triples_p1` (length n+1) contains all the triples from `triples` (length n)
"""
return all(x in triples_p1 for x in triples)
def process(self, out_dir, split, entryset_out):
"""
Processes and outputs training data for the sentence fusion model
"""
out_path = os.path.join(out_dir, self.mode)
os.makedirs(out_path, exist_ok=True)
f_in = open(os.path.join(out_path, f"{split}.in"), "w")
f_ref = open(os.path.join(out_path, f"{split}.ref"), "w")
entries_processed = 0
samples_processed = 0
log_step = 10
log_next_percentage = log_step
n = len(entryset_out)
for entry in entryset_out:
entries_processed += 1
if self.dataset.is_d2t:
pairs = self._get_lex_pairs(entry)
else:
pairs = [entry]
for inp, ref in pairs:
f_in.write(inp + "\n")
f_ref.write(ref + "\n")
samples_processed += 1
if int(100*entries_processed / n) == log_next_percentage:
logger.info(f"{entries_processed} entries processed, {samples_processed} samples extracted")
log_next_percentage += log_step
def save_orig_references(self, output_path):
ref_dir = os.path.join(output_path, "ref")
os.makedirs(ref_dir, exist_ok=True)
logger.info(f"Saving references to {ref_dir}")
for split in ["dev", "test"]:
f_ref = open(os.path.join(ref_dir, f"{split}.ref"), "w")
for entry in self.dataset.data[split]:
lexs = "\n".join([l['target_txt'] for l in entry.lexs])
f_ref.write(lexs + "\n\n")
def _fill_template(self, template, triple):
"""
Fills a template with the data from the triple
"""
template = template.replace("<subject>", triple.subj) \
.replace("<predicate>", triple.pred) \
.replace("<object>", triple.obj)
return template
def _get_lex_pairs(self, entry):
"""
Combines lexicalizations based on the selected mode
"""
inp_sents = []
assert len(entry["data"]) == 1
triple = entry["data"][0]
text_list = [self.tokenizer.tokenize(el) for el in entry["text_list"]]
ref_list = [self.tokenizer.tokenize(el) for el in entry["ref_list"]]
templates = self.dataset.get_templates(triple)
templates = [self._fill_template(template, triple) for template in templates]
templates = [self.tokenizer.tokenize(template) for template in templates]
pairs = []
if self.mode == "best":
text = self.sentence_scorer.select_best(text_list)
template = self.sentence_scorer.select_best(templates)
ref = self.sentence_scorer.select_best(ref_list)
inp = " ".join([text, template])
pairs.append((inp, ref))
elif self.mode == "best_tgt":
ref = self.sentence_scorer.select_best(ref_list)
for text in text_list:
for template in templates:
inp = " ".join([text, template])
pairs.append((inp, ref))
elif self.mode == "full":
for text in text_list:
for template in templates:
inp = " ".join([text, template])
for ref in ref_list:
pairs.append((inp, ref))
else:
raise ValueError("Unknown mode (available: 'best', 'best_tgt', 'full')")
return pairs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True,
help="Dataset class.")
parser.add_argument("--input", type=str, required=True,
help="Path to the dataset")
parser.add_argument("--mode", type=str, required=True,
help="Preprocess mode ('best', 'best_tgt', 'full')")
parser.add_argument("--lms_device", default="cpu", type=str, required=True,
help="Device for the sentence scorer ('cpu' / 'cuda').")
parser.add_argument("--force_generate_templates", action='store_true',
help="Re-generate the templates which can be generated automatically.")
parser.add_argument('--splits', type=str, nargs='+', default=["train", "dev", "test"],
help='Dataset splits (e.g. train dev test)')
args = parser.parse_args()
lms_device = 'cuda' if args.lms_device == 'gpu' else 'cpu'
# Load dataset class by name
try:
dataset_mod = __import__("datasets", fromlist=[args.dataset])
dataset_cls = getattr(dataset_mod, args.dataset)
dataset = dataset_cls()
except AttributeError as err:
logger.error(f"Unknown dataset: '{args.dataset}'. Please create a class '{args.dataset}' in 'datasets.py'.")
raise err
# Load data
logger.info(f"Loading dataset {args.dataset}")
try:
dataset.load_from_dir(path=args.input, splits=args.splits)
except FileNotFoundError as err:
logger.error(f"Dataset not found in {args.input}")
raise err
# Create output directory
try:
out_dirname = os.path.join("data", dataset.name)
os.makedirs(out_dirname, exist_ok=True)
except OSError as err:
logger.error(f"Output directory {out_dirname} can not be created")
raise err
# WebNLG / E2E / ...
if dataset.is_d2t:
# Load or extract templates
dataset.load_templates(out_dirname, args.force_generate_templates)
# Extract incremental examples
preprocessor = Preprocessor(dataset=dataset, mode=args.mode, device=lms_device)
logger.info(f"Processing the {args.dataset} dataset (mode={args.mode})")
preprocessor.extract_incremental(splits=args.splits, output_path=out_dirname)
# Extract references for later evaluation
if "dev" in args.splits and "test" in args.splits:
preprocessor.save_orig_references(output_path=out_dirname)
# DiscoFuse: dataset can be sent directly to output
else:
for split in args.splits:
preprocessor = Preprocessor(dataset=dataset, mode=args.mode, device=lms_device)
preprocessor.process(out_dirname, split, dataset.data[split])
logger.info(f"Preprocessing finished.") | 0.465387 | 0.26106 |
import os
import sys
import sqlalchemy
import datetime
_S2DB_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
sys.path.append(_S2DB_ROOT)
from common.config import config
from common.log import get_logger
logger = get_logger(__name__)
engine = sqlalchemy.create_engine(config["db"]["uri"], echo=False)
connection = engine.connect()
DB_VERSION = 0
def db_insert(query, params={}):
query = sqlalchemy.sql.text(query)
connection.execute(query,params)
def db_select(query, params={}):
query = sqlalchemy.sql.text(query)
cursor = connection.execute(query,params)
rows = cursor.fetchall()
return [(dict(row.items())) for row in rows]
def db_insertfromfile(file):
querys = open(_S2DB_ROOT+"/db/"+file,"r").read()
with engine.begin() as transaction:
# TODO: make this better, but psycopg2 doesn't allow mulitple queries
for query in querys.split(";"):
query = query.strip()
if query and query != "":
q = sqlalchemy.sql.text(query)
transaction.execute(query)
def db_get_version():
db_insert("create table if not exists versions (version int not null unique)")
versions = db_select("select * from versions order by version asc limit 1")
if versions:
return versions[0]['version']
return None
def db_init():
if not config["db"]["uri"].startswith("postgresql"):
raise Exception("Only postgresql DB supported")
version = db_get_version()
if version is None:
# DB does not exist
db_insertfromfile("create.sql")
elif version != DB_VERSION:
if version > DB_VERSION:
raise Exception("DB version from the future. DB downgrade not supported.")
for v in range(version, DB_VERSION):
logger.info("Migrating from "+str(v)+" to "+str(v+1))
print("Migrate: "+str(v))
db_insertfromfile("migrations/"+str(v+1)+".sql")
version = db_get_version()
if version is None or version != DB_VERSION:
raise Exception("DB migration or creation failed")
def db_sequencer_task_add(hash, path, parent=None, cls=None, family=None, name=None, time=int(datetime.datetime.utcnow().timestamp())):
db_insert("insert into sequencer_tasks (hash, path, parent, class, family, name, time_added) values (:hash, :path, :parent, :class, :family, :name, :time)",
{"hash":hash, "path":path, "parent":parent, "class":cls, "family":family, "name":name, "time":time})
def db_sequencer_task_get_and_set_start(worker, time=int(datetime.datetime.utcnow().timestamp())):
db_insert("update sequencer_tasks set time_started=:time, worker=:worker where path = (select path from sequencer_tasks where worker is null order by time_added asc limit 1)",
{"time":time, "worker":worker})
rows = db_select("select path, parent, class, family, name from sequencer_tasks where worker=:worker limit 1",
{"worker":worker})
if rows == []:
return None
return rows[0]
def db_sequencer_task_remove(path):
db_insert("delete from sequencer_tasks where path=:path", {"path":path})
def db_sequences_add(software, sequences):
query_sequences = "insert into sequences (type, hash, representation, signature) values (:type, :sequence, :representation, :signature) on conflict do nothing"
query_softwaressequences = "insert into softwaressequences (software, sequence) values (:software, :sequence)"
for sequence in sequences:
db_insert(query_sequences, {"type":sequence["type"], "sequence":sequence["sequence"], "representation":sequence["representation"], "signature":sequence["signature"]})
db_insert(query_softwaressequences, {"software":software, "sequence":sequence["sequence"]})
def db_software_add(hash, mime, parent=None, cls=None, family=None, name=None, first_seen=int(datetime.datetime.utcnow().timestamp())):
db_insert("insert into softwares (hash, mime, parent, class, family, name, first_seen) values (:hash, :mime, :parent, :class, :family, :name, :first_seen)",
{"hash":hash, "mime":mime, "parent":parent, "class":cls, "family":family, "name":name, "first_seen":first_seen})
def db_get_softwares(limit=10000,offset=0):
return db_select("select * from softwares limit :limit offset :offset",{"limit":limit,"offset":offset})
def db_get_software(hash):
softwares = db_select("select * from softwares where hash=:hash",{"hash":hash})
if softwares:
return softwares[0]
return None
def db_get_softwaretree(hash):
return db_select("select m.hash,p.hash parent_hash,p.class parent_class,p.family parent_family,p.name parent_name,c.hash child_hash,c.class child_class,c.family child_family,c.name child_name from softwares m left join softwares p on m.parent = p.hash left join softwares c on m.hash = c.parent where m.hash=:hash",
{"hash":hash})
def db_software_exists(hash):
return len(db_select("select * from softwares where hash=:hash",{"hash":hash})) > 0
def db_get_softwaressequencesgrouped(hash):
return db_select("""
select * from (
select class,jsonb_agg(data) as data from (
select class,jsonb_build_object('family',family,'data',json_agg(data)) as data from (
select class,family,jsonb_build_object('type',type,'data',jsonb_agg(data)) as data from (
select array_agg(distinct class) as class,array_agg(distinct family) as family,b.type,jsonb_build_object('sequence',a.sequence,'representation',b.representation,'signature',b.signature,'softwares',jsonb_agg(data)) as data from (
select c.class,c.family,a.sequence,jsonb_build_object('name',c.name,'hash',c.hash) as data
from softwaressequences a
join softwaressequences b on a.software = :hash and a.sequence = b.sequence
join softwares c on c.hash != :hash and b.software = c.hash
group by c.class,c.family,a.sequence,c.name,c.hash,data ) as a
join sequences b on a.sequence = b.hash
group by b.type,a.sequence,b.representation,b.signature) as a
group by class,family,type ) as a
group by class,family order by cardinality(family)) as a
group by class order by cardinality(class)) as a where cardinality(class) <= 1
""",
{'hash':hash})
def db_get_softwaressequences(hash):
return db_select("""
select class,family,type,representation,signature,name,c.hash from softwaressequences a join softwaressequences b on a.software = :hash and a.sequence = b.sequence join softwares c on c.hash != :hash and b.software = c.hash join sequences d on a.sequence = d.hash
""",
{'hash':hash})
def db_set_classfamilyname(hash, cls, family, name=None):
if name:
db_insert("update softwares set class=:class, family=:family, name=:name where hash = :hash",
{'class':cls, 'family':family, 'name':name, 'hash':hash})
else:
db_insert("update softwares set class=:class, family=:family where hash = :hash",
{'class':cls, 'family':family, 'hash':hash})
# TODO: find better place for this
os.makedirs(config['storage'].get('path', raw=True), exist_ok=True)
db_init() | s2db/db/engine.py | import os
import sys
import sqlalchemy
import datetime
_S2DB_ROOT = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
sys.path.append(_S2DB_ROOT)
from common.config import config
from common.log import get_logger
logger = get_logger(__name__)
engine = sqlalchemy.create_engine(config["db"]["uri"], echo=False)
connection = engine.connect()
DB_VERSION = 0
def db_insert(query, params={}):
query = sqlalchemy.sql.text(query)
connection.execute(query,params)
def db_select(query, params={}):
query = sqlalchemy.sql.text(query)
cursor = connection.execute(query,params)
rows = cursor.fetchall()
return [(dict(row.items())) for row in rows]
def db_insertfromfile(file):
querys = open(_S2DB_ROOT+"/db/"+file,"r").read()
with engine.begin() as transaction:
# TODO: make this better, but psycopg2 doesn't allow mulitple queries
for query in querys.split(";"):
query = query.strip()
if query and query != "":
q = sqlalchemy.sql.text(query)
transaction.execute(query)
def db_get_version():
db_insert("create table if not exists versions (version int not null unique)")
versions = db_select("select * from versions order by version asc limit 1")
if versions:
return versions[0]['version']
return None
def db_init():
if not config["db"]["uri"].startswith("postgresql"):
raise Exception("Only postgresql DB supported")
version = db_get_version()
if version is None:
# DB does not exist
db_insertfromfile("create.sql")
elif version != DB_VERSION:
if version > DB_VERSION:
raise Exception("DB version from the future. DB downgrade not supported.")
for v in range(version, DB_VERSION):
logger.info("Migrating from "+str(v)+" to "+str(v+1))
print("Migrate: "+str(v))
db_insertfromfile("migrations/"+str(v+1)+".sql")
version = db_get_version()
if version is None or version != DB_VERSION:
raise Exception("DB migration or creation failed")
def db_sequencer_task_add(hash, path, parent=None, cls=None, family=None, name=None, time=int(datetime.datetime.utcnow().timestamp())):
db_insert("insert into sequencer_tasks (hash, path, parent, class, family, name, time_added) values (:hash, :path, :parent, :class, :family, :name, :time)",
{"hash":hash, "path":path, "parent":parent, "class":cls, "family":family, "name":name, "time":time})
def db_sequencer_task_get_and_set_start(worker, time=int(datetime.datetime.utcnow().timestamp())):
db_insert("update sequencer_tasks set time_started=:time, worker=:worker where path = (select path from sequencer_tasks where worker is null order by time_added asc limit 1)",
{"time":time, "worker":worker})
rows = db_select("select path, parent, class, family, name from sequencer_tasks where worker=:worker limit 1",
{"worker":worker})
if rows == []:
return None
return rows[0]
def db_sequencer_task_remove(path):
db_insert("delete from sequencer_tasks where path=:path", {"path":path})
def db_sequences_add(software, sequences):
query_sequences = "insert into sequences (type, hash, representation, signature) values (:type, :sequence, :representation, :signature) on conflict do nothing"
query_softwaressequences = "insert into softwaressequences (software, sequence) values (:software, :sequence)"
for sequence in sequences:
db_insert(query_sequences, {"type":sequence["type"], "sequence":sequence["sequence"], "representation":sequence["representation"], "signature":sequence["signature"]})
db_insert(query_softwaressequences, {"software":software, "sequence":sequence["sequence"]})
def db_software_add(hash, mime, parent=None, cls=None, family=None, name=None, first_seen=int(datetime.datetime.utcnow().timestamp())):
db_insert("insert into softwares (hash, mime, parent, class, family, name, first_seen) values (:hash, :mime, :parent, :class, :family, :name, :first_seen)",
{"hash":hash, "mime":mime, "parent":parent, "class":cls, "family":family, "name":name, "first_seen":first_seen})
def db_get_softwares(limit=10000,offset=0):
return db_select("select * from softwares limit :limit offset :offset",{"limit":limit,"offset":offset})
def db_get_software(hash):
softwares = db_select("select * from softwares where hash=:hash",{"hash":hash})
if softwares:
return softwares[0]
return None
def db_get_softwaretree(hash):
return db_select("select m.hash,p.hash parent_hash,p.class parent_class,p.family parent_family,p.name parent_name,c.hash child_hash,c.class child_class,c.family child_family,c.name child_name from softwares m left join softwares p on m.parent = p.hash left join softwares c on m.hash = c.parent where m.hash=:hash",
{"hash":hash})
def db_software_exists(hash):
return len(db_select("select * from softwares where hash=:hash",{"hash":hash})) > 0
def db_get_softwaressequencesgrouped(hash):
return db_select("""
select * from (
select class,jsonb_agg(data) as data from (
select class,jsonb_build_object('family',family,'data',json_agg(data)) as data from (
select class,family,jsonb_build_object('type',type,'data',jsonb_agg(data)) as data from (
select array_agg(distinct class) as class,array_agg(distinct family) as family,b.type,jsonb_build_object('sequence',a.sequence,'representation',b.representation,'signature',b.signature,'softwares',jsonb_agg(data)) as data from (
select c.class,c.family,a.sequence,jsonb_build_object('name',c.name,'hash',c.hash) as data
from softwaressequences a
join softwaressequences b on a.software = :hash and a.sequence = b.sequence
join softwares c on c.hash != :hash and b.software = c.hash
group by c.class,c.family,a.sequence,c.name,c.hash,data ) as a
join sequences b on a.sequence = b.hash
group by b.type,a.sequence,b.representation,b.signature) as a
group by class,family,type ) as a
group by class,family order by cardinality(family)) as a
group by class order by cardinality(class)) as a where cardinality(class) <= 1
""",
{'hash':hash})
def db_get_softwaressequences(hash):
return db_select("""
select class,family,type,representation,signature,name,c.hash from softwaressequences a join softwaressequences b on a.software = :hash and a.sequence = b.sequence join softwares c on c.hash != :hash and b.software = c.hash join sequences d on a.sequence = d.hash
""",
{'hash':hash})
def db_set_classfamilyname(hash, cls, family, name=None):
if name:
db_insert("update softwares set class=:class, family=:family, name=:name where hash = :hash",
{'class':cls, 'family':family, 'name':name, 'hash':hash})
else:
db_insert("update softwares set class=:class, family=:family where hash = :hash",
{'class':cls, 'family':family, 'hash':hash})
# TODO: find better place for this
os.makedirs(config['storage'].get('path', raw=True), exist_ok=True)
db_init() | 0.185283 | 0.126273 |
import os
import click
from train_and_eval import (
train_and_eval_arct,
train_and_eval_arc,
train_and_eval_piqa,
train_and_eval_csqa,
)
@click.group()
def cli():
pass
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=102,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 102)",
)
@click.option(
"--data_path",
default=os.path.join(os.getcwd(), "data"),
type=click.Path(),
help="Path to directory where data is located. (default: ./data/)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=90,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 90)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=115,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 115)",
)
@click.option(
"--data_path",
default=os.path.join(os.getcwd(), "data"),
type=click.Path(),
help="Path to directory where data is located. (default: ./data/)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=87,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 87)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_csqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_csqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
if __name__ == "__main__":
cli.add_command(gpt2_for_arct)
cli.add_command(gpt2_for_arc)
cli.add_command(gpt2_for_piqa)
cli.add_command(gpt2_for_csqa)
cli() | GPT-2/main.py | import os
import click
from train_and_eval import (
train_and_eval_arct,
train_and_eval_arc,
train_and_eval_piqa,
train_and_eval_csqa,
)
@click.group()
def cli():
pass
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=102,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 102)",
)
@click.option(
"--data_path",
default=os.path.join(os.getcwd(), "data"),
type=click.Path(),
help="Path to directory where data is located. (default: ./data/)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_arct(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=90,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 90)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_arc(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=115,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 115)",
)
@click.option(
"--data_path",
default=os.path.join(os.getcwd(), "data"),
type=click.Path(),
help="Path to directory where data is located. (default: ./data/)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_piqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
data_path,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
@click.command()
@click.option("--do_train", is_flag=True, help="Train the model.")
@click.option("--do_test", is_flag=True, help="Test the model.")
@click.option(
"--load_from_checkpoint",
default=None,
type=click.Path(),
help="Whether to load the model from a given checkpoint. (default: None)",
)
@click.option(
"--epochs",
default=10,
type=int,
help="Epochs of training (default: 10)",
)
@click.option(
"--learning_rate",
default=1e-5,
type=float,
help="Learning rate (default: 1e-5)",
)
@click.option(
"--weight_decay",
default=0.1,
type=float,
help="Weight Decay for Adam opt (default: 0.1)",
)
@click.option(
"--no_lr_schedule",
is_flag=True,
help="Don't use LR Scheduling.",
)
@click.option(
"--warmup_ratio",
default=0.06,
type=float,
help="Warmup ratio for linear schedule (default: 0.06)",
)
@click.option(
"--gradient_accumulation_steps",
default=0,
type=int,
help="How many batches should the gradient accumulate for before doing an update. (default: 0)",
)
@click.option(
"--seed",
default=42,
type=int,
help="Random seed. (default: 42)",
)
@click.option(
"--model_name",
default="gpt2-base",
type=str,
help="Model name from huggingface gpt2 pre-trained models. (default: gpt2-base)",
)
@click.option(
"--batch_size", default=8, type=int, help="Training batch size. (default: 8)"
)
@click.option(
"--max_seq_len",
default=87,
type=int,
help="Maximum length for any example sequence (in subwords). (default: 87)",
)
@click.option(
"--log_path",
default=os.path.join(os.getcwd(), "logs"),
type=click.Path(),
help="Path to directory where tensorboard logs will be saved. (default: ./logs/)",
)
@click.option(
"--checkpoint_path",
default=os.path.join(os.getcwd(), "checkpoints"),
type=click.Path(),
help="Path to directory where model checkpoints will be saved. (default: ./checkpoints/)",
)
@click.option(
"--save_top_k",
default=-1,
type=int,
help=(
"Save top k model checkpoints, if -1 saves all, else saves the input number, "
"according to validation loss. (default: -1)"
),
)
@click.option(
"--use_early_stop",
is_flag=True,
help="Whether to use early stopping.",
)
@click.option(
"--early_stop_metric",
type=str,
default="Loss/Validation",
help="Metric to monitor for early stopping criteria. (default: Loss/Validation)",
)
@click.option(
"--early_stop_patience",
type=int,
default=3,
help=(
"Number of validation epochs with no improvement after which training will be "
"stopped. (default: 3)"
),
)
@click.option(
"--early_stop_mode",
type=click.Choice(["auto", "min", "max"]),
default="auto",
help=(
"Min mode, training will stop when the quantity monitored has stopped decreasing;"
"in max mode it will stop when the quantity monitored has stopped increasing;"
),
)
def gpt2_for_csqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
):
train_and_eval_csqa(
do_train,
do_test,
load_from_checkpoint,
epochs,
learning_rate,
weight_decay,
not no_lr_schedule,
warmup_ratio,
gradient_accumulation_steps,
seed,
model_name,
batch_size,
max_seq_len,
log_path,
checkpoint_path,
save_top_k,
use_early_stop,
early_stop_metric,
early_stop_patience,
early_stop_mode,
)
if __name__ == "__main__":
cli.add_command(gpt2_for_arct)
cli.add_command(gpt2_for_arc)
cli.add_command(gpt2_for_piqa)
cli.add_command(gpt2_for_csqa)
cli() | 0.50293 | 0.214846 |
# Copyright Toolkit Authors
"""Test metadata loader script with Pytest."""
import pytest
def test_metadata_model():
"""Run the metadata loader test."""
path = 'test/records/json_model_test/json_test.json.json'
from pydtk.models import MetaDataModel
assert MetaDataModel.is_loadable(path)
# load
metadata = MetaDataModel()
metadata.load(path)
metadata.save('/tmp/test.json')
def test_csv_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/csv_model_test/data/test.csv.json'
path = 'test/records/csv_model_test/data/test.csv'
from pydtk.models import MetaDataModel
from pydtk.models.csv import CameraTimestampCsvModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
csv = CameraTimestampCsvModel(metadata=metadata)
csv.load(path)
csv.save('/tmp/test.csv')
def test_image_model():
"""Run the GenericImageModel test."""
meta_path = 'test/records/image_model_test/sample.png.json'
from pydtk.models import MetaDataModel
from pydtk.models.image import GenericImageModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = GenericImageModel(metadata=metadata)
model.load()
assert isinstance(model.to_ndarray(), np.ndarray)
model.save('/tmp/test_image.png')
def test_annotation_model():
"""Run the AnnotationCsvModel test."""
meta_path = 'test/records/annotation_model_test/annotation_test.csv.json'
from pydtk.models import MetaDataModel
from pydtk.models.csv import AnnotationCsvModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
annotation_model = AnnotationCsvModel(metadata=metadata)
annotation_model.load()
assert isinstance(annotation_model.to_ndarray(), np.ndarray)
annotation_model.save('/tmp/test_annotation.csv')
def test_forecast_model():
"""Run the ForecastCsvModel test."""
meta_path = 'test/records/forecast_model_test/forecast_test.csv.json'
from pydtk.models import MetaDataModel
from pydtk.models.csv import ForecastCsvModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
forecast_model = ForecastCsvModel(metadata=metadata)
forecast_model.load()
assert isinstance(forecast_model.to_ndarray(), np.ndarray)
forecast_model.save('/tmp/test_forecast.csv')
from datetime import datetime
strp_foramt = "%Y/%m/%d %H:%M:%S"
forecast_model.load(
start_timestamp=datetime.strptime("2020/11/03 00:30:00", strp_foramt).timestamp(),
end_timestamp=datetime.strptime("2020/11/03 01:20:00", strp_foramt).timestamp(),
)
assert isinstance(forecast_model.to_ndarray(), np.ndarray)
forecast_model.save("/tmp/test_forecast_query.csv")
def test_json_model():
"""Run the GenericJsonModel test."""
meta_path = 'test/records/json_model_test/json_test.json.json'
from pydtk.models import MetaDataModel
from pydtk.models.json_model import GenericJsonModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
json_model = GenericJsonModel(metadata=metadata)
json_model.load()
assert isinstance(json_model.data, dict)
json_model.save('/tmp/test_json.json')
def test_movie_model():
"""Run the GenericMovieModel test."""
meta_path = 'test/records/movie_model_test/sample.mp4.json'
from pydtk.models import MetaDataModel
from pydtk.models.movie import GenericMovieModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = GenericMovieModel(metadata=metadata)
model.load()
assert isinstance(model.to_ndarray(), np.ndarray)
model.save('/tmp/test_movie.mp4')
@pytest.mark.extra
@pytest.mark.pointcloud
def test_pointcloud_pcd_model():
"""Test pointcloud/PCDModel."""
path = 'test/assets/test_pointcloud.pcd'
import numpy as np
from pydtk.models.pointcloud.pcd import PCDModel
# Generate point-cloud
pointcloud = np.random.random_sample((100, 4)) * np.array([100, 100, 100, 1])
# Set
pcd = PCDModel()
pcd.from_ndarray(pointcloud, columns=['x', 'y', 'z', 'intensity'])
# Save
pcd.save(path)
# Load
new_pcd = PCDModel()
new_pcd.load(path)
# Assertion
new_pointcloud = new_pcd.to_ndarray()
diff = np.sum((pointcloud - new_pointcloud) ** 2)
assert diff == 0.0
assert all([c in new_pcd._columns for c in pcd._columns])
assert all([c in pcd._columns for c in new_pcd._columns])
@pytest.mark.extra
@pytest.mark.ros
def test_std_msgs_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/analog/speed_pulse')
@pytest.mark.extra
@pytest.mark.ros
def test_sensor_msgs_nav_sat_fix_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/gnss')
@pytest.mark.extra
@pytest.mark.ros
def test_geometry_msgs_accel_stamped_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/acceleration')
@pytest.mark.extra
@pytest.mark.ros
def test_sensor_msgs_pointcloud2_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import SensorMsgsPointCloud2RosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = SensorMsgsPointCloud2RosbagModel(metadata=metadata)
model.configure(fields=('x', 'y', 'z', 'intensity'))
model.load(path, contents='/points_concat_downsampled')
@pytest.mark.extra
@pytest.mark.ros
def test_autoware_can_msgs_can_packet_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/can_model_test/test.bag.json'
path = 'test/records/can_model_test/test.bag'
from pydtk.models import MetaDataModel
from pydtk.models.autoware import AutowareCanMsgsCANPacketRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = AutowareCanMsgsCANPacketRosbagModel(
metadata=metadata,
path_to_assign_list='test/assets/can_assign_list.csv'
)
model.load(path, contents='/vehicle/can_raw')
timestamps = model.timestamps
data = model.to_ndarray()
columns = model.columns
assert len(timestamps) == len(data)
assert len(columns) == data.shape[-1]
# load with configuration
model = AutowareCanMsgsCANPacketRosbagModel(metadata=metadata)
model.configure(path_to_assign_list='test/assets/can_assign_list.csv')
model.load(path, contents='/vehicle/can_raw')
# retrieve
timestamps = model.timestamps
data = model.to_ndarray()
columns = model.columns
assert len(timestamps) == len(data)
assert len(columns) == data.shape[-1]
@pytest.mark.extra
@pytest.mark.ros
@pytest.mark.zstd
def test_std_msgs_zstd_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/zstd_rosbag_model_test/data/records.bag.zst.json'
path = 'test/records/zstd_rosbag_model_test/data/records.bag.zst'
from pydtk.models import MetaDataModel
from pydtk.models.zstd.rosbag import GenericZstdRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericZstdRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/analog/speed_pulse')
if __name__ == '__main__':
# test_metadata_model()
# test_csv_model()
# test_std_msgs_rosbag_model()
# test_sensor_msgs_nav_sat_fix_rosbag_model()
# test_geometry_msgs_accel_stamped_rosbag_model()
# test_sensor_msgs_pointcloud2_rosbag_model()
# test_autoware_can_msgs_can_packet_rosbag_model()
test_pointcloud_pcd_model() | test/test_models.py |
# Copyright Toolkit Authors
"""Test metadata loader script with Pytest."""
import pytest
def test_metadata_model():
"""Run the metadata loader test."""
path = 'test/records/json_model_test/json_test.json.json'
from pydtk.models import MetaDataModel
assert MetaDataModel.is_loadable(path)
# load
metadata = MetaDataModel()
metadata.load(path)
metadata.save('/tmp/test.json')
def test_csv_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/csv_model_test/data/test.csv.json'
path = 'test/records/csv_model_test/data/test.csv'
from pydtk.models import MetaDataModel
from pydtk.models.csv import CameraTimestampCsvModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
csv = CameraTimestampCsvModel(metadata=metadata)
csv.load(path)
csv.save('/tmp/test.csv')
def test_image_model():
"""Run the GenericImageModel test."""
meta_path = 'test/records/image_model_test/sample.png.json'
from pydtk.models import MetaDataModel
from pydtk.models.image import GenericImageModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = GenericImageModel(metadata=metadata)
model.load()
assert isinstance(model.to_ndarray(), np.ndarray)
model.save('/tmp/test_image.png')
def test_annotation_model():
"""Run the AnnotationCsvModel test."""
meta_path = 'test/records/annotation_model_test/annotation_test.csv.json'
from pydtk.models import MetaDataModel
from pydtk.models.csv import AnnotationCsvModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
annotation_model = AnnotationCsvModel(metadata=metadata)
annotation_model.load()
assert isinstance(annotation_model.to_ndarray(), np.ndarray)
annotation_model.save('/tmp/test_annotation.csv')
def test_forecast_model():
"""Run the ForecastCsvModel test."""
meta_path = 'test/records/forecast_model_test/forecast_test.csv.json'
from pydtk.models import MetaDataModel
from pydtk.models.csv import ForecastCsvModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
forecast_model = ForecastCsvModel(metadata=metadata)
forecast_model.load()
assert isinstance(forecast_model.to_ndarray(), np.ndarray)
forecast_model.save('/tmp/test_forecast.csv')
from datetime import datetime
strp_foramt = "%Y/%m/%d %H:%M:%S"
forecast_model.load(
start_timestamp=datetime.strptime("2020/11/03 00:30:00", strp_foramt).timestamp(),
end_timestamp=datetime.strptime("2020/11/03 01:20:00", strp_foramt).timestamp(),
)
assert isinstance(forecast_model.to_ndarray(), np.ndarray)
forecast_model.save("/tmp/test_forecast_query.csv")
def test_json_model():
"""Run the GenericJsonModel test."""
meta_path = 'test/records/json_model_test/json_test.json.json'
from pydtk.models import MetaDataModel
from pydtk.models.json_model import GenericJsonModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
json_model = GenericJsonModel(metadata=metadata)
json_model.load()
assert isinstance(json_model.data, dict)
json_model.save('/tmp/test_json.json')
def test_movie_model():
"""Run the GenericMovieModel test."""
meta_path = 'test/records/movie_model_test/sample.mp4.json'
from pydtk.models import MetaDataModel
from pydtk.models.movie import GenericMovieModel
import numpy as np
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = GenericMovieModel(metadata=metadata)
model.load()
assert isinstance(model.to_ndarray(), np.ndarray)
model.save('/tmp/test_movie.mp4')
@pytest.mark.extra
@pytest.mark.pointcloud
def test_pointcloud_pcd_model():
"""Test pointcloud/PCDModel."""
path = 'test/assets/test_pointcloud.pcd'
import numpy as np
from pydtk.models.pointcloud.pcd import PCDModel
# Generate point-cloud
pointcloud = np.random.random_sample((100, 4)) * np.array([100, 100, 100, 1])
# Set
pcd = PCDModel()
pcd.from_ndarray(pointcloud, columns=['x', 'y', 'z', 'intensity'])
# Save
pcd.save(path)
# Load
new_pcd = PCDModel()
new_pcd.load(path)
# Assertion
new_pointcloud = new_pcd.to_ndarray()
diff = np.sum((pointcloud - new_pointcloud) ** 2)
assert diff == 0.0
assert all([c in new_pcd._columns for c in pcd._columns])
assert all([c in pcd._columns for c in new_pcd._columns])
@pytest.mark.extra
@pytest.mark.ros
def test_std_msgs_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/analog/speed_pulse')
@pytest.mark.extra
@pytest.mark.ros
def test_sensor_msgs_nav_sat_fix_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/gnss')
@pytest.mark.extra
@pytest.mark.ros
def test_geometry_msgs_accel_stamped_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import GenericRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/acceleration')
@pytest.mark.extra
@pytest.mark.ros
def test_sensor_msgs_pointcloud2_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/sample/data/records.bag.json'
path = 'test/records/sample/data/records.bag'
from pydtk.models import MetaDataModel
from pydtk.models.rosbag import SensorMsgsPointCloud2RosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = SensorMsgsPointCloud2RosbagModel(metadata=metadata)
model.configure(fields=('x', 'y', 'z', 'intensity'))
model.load(path, contents='/points_concat_downsampled')
@pytest.mark.extra
@pytest.mark.ros
def test_autoware_can_msgs_can_packet_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/can_model_test/test.bag.json'
path = 'test/records/can_model_test/test.bag'
from pydtk.models import MetaDataModel
from pydtk.models.autoware import AutowareCanMsgsCANPacketRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
model = AutowareCanMsgsCANPacketRosbagModel(
metadata=metadata,
path_to_assign_list='test/assets/can_assign_list.csv'
)
model.load(path, contents='/vehicle/can_raw')
timestamps = model.timestamps
data = model.to_ndarray()
columns = model.columns
assert len(timestamps) == len(data)
assert len(columns) == data.shape[-1]
# load with configuration
model = AutowareCanMsgsCANPacketRosbagModel(metadata=metadata)
model.configure(path_to_assign_list='test/assets/can_assign_list.csv')
model.load(path, contents='/vehicle/can_raw')
# retrieve
timestamps = model.timestamps
data = model.to_ndarray()
columns = model.columns
assert len(timestamps) == len(data)
assert len(columns) == data.shape[-1]
@pytest.mark.extra
@pytest.mark.ros
@pytest.mark.zstd
def test_std_msgs_zstd_rosbag_model():
"""Run the metadata and data loader test."""
meta_path = 'test/records/zstd_rosbag_model_test/data/records.bag.zst.json'
path = 'test/records/zstd_rosbag_model_test/data/records.bag.zst'
from pydtk.models import MetaDataModel
from pydtk.models.zstd.rosbag import GenericZstdRosbagModel
# load metadata
metadata = MetaDataModel()
metadata.load(meta_path)
# load
data = GenericZstdRosbagModel(metadata=metadata)
data.load(path, contents='/vehicle/analog/speed_pulse')
if __name__ == '__main__':
# test_metadata_model()
# test_csv_model()
# test_std_msgs_rosbag_model()
# test_sensor_msgs_nav_sat_fix_rosbag_model()
# test_geometry_msgs_accel_stamped_rosbag_model()
# test_sensor_msgs_pointcloud2_rosbag_model()
# test_autoware_can_msgs_can_packet_rosbag_model()
test_pointcloud_pcd_model() | 0.781956 | 0.40539 |
import asyncio
import logging
import pigpio
from surrortg.inputs import Switch
from surrortg.inputs.input_filters import SpamFilter
from games.arcade_pinball.config import (
BUTTON_PRESS_TIME,
MAX_HOLD_TIME,
MAX_INPUTS_PER_INPUT,
PER_SECONDS,
)
class ArcadeMultiButton(Switch):
def __init__(
self,
pi,
pins,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
self.pi = pi
self.pins = pins
self.name = name
self.button_press_time = button_press_time
self.abuse_callback = abuse_function
self.task = None
self.spam_filter = SpamFilter(MAX_INPUTS_PER_INPUT, PER_SECONDS)
for pin in self.pins:
self.pi.set_mode(pin, pigpio.OUTPUT)
async def on(self, seat=0):
if not self.spam_filter.too_much_spam():
logging.debug(f"{self.name} on")
for pin in self.pins:
self.pi.write(pin, 0)
self._reset_timer(True)
else:
logging.info(f"Too much spam for {self.name}")
await self.off()
async def off(self, seat=0):
logging.debug(f"{self.name} off")
for pin in self.pins:
self.pi.write(pin, 1)
self._reset_timer(False)
async def shutdown(self, seat=0):
# ArcadePinballGame handles stopping pigpio
if self.pi.connected:
await self.off()
async def single_press(self):
await self.on()
await asyncio.sleep(self.button_press_time)
await self.off()
def _reset_timer(self, start_new):
if self.task is not None and not self.task.cancelled():
self.task.cancel()
if start_new:
self.task = asyncio.create_task(self._lock_controls())
async def _lock_controls(self):
await asyncio.sleep(MAX_HOLD_TIME)
logging.info("Locking controls due to abuse")
if self.abuse_callback is not None:
await self.abuse_callback()
class ArcadeButton(ArcadeMultiButton):
def __init__(
self,
pi,
pin,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
super().__init__(
pi,
[pin],
name,
abuse_function=abuse_function,
button_press_time=button_press_time,
)
if __name__ == "__main__":
from games.arcade_pinball.config import (
LEFT_FLIPPER_PIN,
RIGHT_FLIPPER_PINS,
MAGNET_BUTTON_PIN,
START_BUTTON_PIN,
SERVICE_BUTTON_PIN,
)
async def test_buttons():
pi = pigpio.pi()
if not pi.connected:
raise RuntimeError("Could not connect to pigpio")
left_flipper = ArcadeButton(pi, LEFT_FLIPPER_PIN, "left")
right_flipper = ArcadeMultiButton(pi, RIGHT_FLIPPER_PINS, "right")
magnet_button = ArcadeButton(pi, MAGNET_BUTTON_PIN, "magnet")
start_button = ArcadeButton(pi, START_BUTTON_PIN, "start")
service_menu_button = ArcadeButton(pi, SERVICE_BUTTON_PIN, "service")
try:
while True:
await left_flipper.on()
await right_flipper.on()
await magnet_button.on()
await start_button.on()
await service_menu_button.on()
asyncio.sleep(5)
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
asyncio.sleep(5)
except KeyboardInterrupt:
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
pi.stop()
asyncio.run(test_buttons()) | games/arcade_pinball/arcade_button.py | import asyncio
import logging
import pigpio
from surrortg.inputs import Switch
from surrortg.inputs.input_filters import SpamFilter
from games.arcade_pinball.config import (
BUTTON_PRESS_TIME,
MAX_HOLD_TIME,
MAX_INPUTS_PER_INPUT,
PER_SECONDS,
)
class ArcadeMultiButton(Switch):
def __init__(
self,
pi,
pins,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
self.pi = pi
self.pins = pins
self.name = name
self.button_press_time = button_press_time
self.abuse_callback = abuse_function
self.task = None
self.spam_filter = SpamFilter(MAX_INPUTS_PER_INPUT, PER_SECONDS)
for pin in self.pins:
self.pi.set_mode(pin, pigpio.OUTPUT)
async def on(self, seat=0):
if not self.spam_filter.too_much_spam():
logging.debug(f"{self.name} on")
for pin in self.pins:
self.pi.write(pin, 0)
self._reset_timer(True)
else:
logging.info(f"Too much spam for {self.name}")
await self.off()
async def off(self, seat=0):
logging.debug(f"{self.name} off")
for pin in self.pins:
self.pi.write(pin, 1)
self._reset_timer(False)
async def shutdown(self, seat=0):
# ArcadePinballGame handles stopping pigpio
if self.pi.connected:
await self.off()
async def single_press(self):
await self.on()
await asyncio.sleep(self.button_press_time)
await self.off()
def _reset_timer(self, start_new):
if self.task is not None and not self.task.cancelled():
self.task.cancel()
if start_new:
self.task = asyncio.create_task(self._lock_controls())
async def _lock_controls(self):
await asyncio.sleep(MAX_HOLD_TIME)
logging.info("Locking controls due to abuse")
if self.abuse_callback is not None:
await self.abuse_callback()
class ArcadeButton(ArcadeMultiButton):
def __init__(
self,
pi,
pin,
name,
abuse_function=None,
button_press_time=BUTTON_PRESS_TIME,
):
super().__init__(
pi,
[pin],
name,
abuse_function=abuse_function,
button_press_time=button_press_time,
)
if __name__ == "__main__":
from games.arcade_pinball.config import (
LEFT_FLIPPER_PIN,
RIGHT_FLIPPER_PINS,
MAGNET_BUTTON_PIN,
START_BUTTON_PIN,
SERVICE_BUTTON_PIN,
)
async def test_buttons():
pi = pigpio.pi()
if not pi.connected:
raise RuntimeError("Could not connect to pigpio")
left_flipper = ArcadeButton(pi, LEFT_FLIPPER_PIN, "left")
right_flipper = ArcadeMultiButton(pi, RIGHT_FLIPPER_PINS, "right")
magnet_button = ArcadeButton(pi, MAGNET_BUTTON_PIN, "magnet")
start_button = ArcadeButton(pi, START_BUTTON_PIN, "start")
service_menu_button = ArcadeButton(pi, SERVICE_BUTTON_PIN, "service")
try:
while True:
await left_flipper.on()
await right_flipper.on()
await magnet_button.on()
await start_button.on()
await service_menu_button.on()
asyncio.sleep(5)
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
asyncio.sleep(5)
except KeyboardInterrupt:
await left_flipper.off()
await right_flipper.off()
await magnet_button.off()
await start_button.off()
await service_menu_button.off()
pi.stop()
asyncio.run(test_buttons()) | 0.382603 | 0.108378 |
from __future__ import print_function, unicode_literals, division, absolute_import
import os, json, collections, concurrent.futures, traceback, sys, time, gc
from multiprocessing import cpu_count
import dateutil.parser
from .. import logger
from ..compat import basestring, THREAD_TIMEOUT_MAX
from ..exceptions import DXError
import numbers
import binascii
import random
def _force_quit(signum, frame):
# traceback.print_stack(frame)
os._exit(os.EX_IOERR)
def get_futures_threadpool(max_workers):
return concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
def wait_for_a_future(futures, print_traceback=False):
"""
Return the next future that completes. If a KeyboardInterrupt is
received, then the entire process is exited immediately. See
wait_for_all_futures for more notes.
"""
while True:
try:
future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX))
break
except concurrent.futures.TimeoutError:
pass
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
return future
def wait_for_all_futures(futures, print_traceback=False):
"""
Wait indefinitely for all futures in the input iterable to complete.
Use a timeout to enable interrupt handling.
Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in concurrent.futures.thread
will run, and issue blocking join() on all worker threads, requiring us to listen to events in worker threads
in order to enable timely exit in response to Ctrl-C.
Note: This still doesn't handle situations where Ctrl-C is pressed elsewhere in the code and there are worker
threads with long-running tasks.
Note: os._exit() doesn't work well with interactive mode (e.g. ipython). This may help:
import __main__ as main; if hasattr(main, '__file__'): os._exit() else: os.exit()
"""
try:
while True:
waited_futures = concurrent.futures.wait(futures, timeout=60)
if len(waited_futures.not_done) == 0:
break
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
def response_iterator(request_iterator, thread_pool, max_active_tasks=None, do_first_task_sequentially=True):
"""
:param request_iterator:
An iterator producing inputs for consumption by the worker pool.
:type request_iterator: iterator of callable, args, kwargs
:param thread_pool: thread pool to submit the requests to
:type thread_pool: concurrent.futures.thread.ThreadPoolExecutor
:param max_active_tasks:
The maximum number of tasks that may be either running or
waiting for consumption of their result. If not given, defaults
to the number of CPU cores on the machine.
:type max_active_tasks: int
:param do_first_task_sequentially:
If True, executes (and returns the result of) the first request
before submitting any other requests (the subsequent requests
are submitted with *max_active_tasks* parallelism).
:type do_first_task_sequentially: bool
Rate-limited asynchronous multithreaded task runner. Consumes tasks
from *request_iterator*. Yields their results in order, while
allowing up to *max_active_tasks* to run simultaneously. Unlike
concurrent.futures.Executor.map, prevents new tasks from starting
while there are *max_active_tasks* or more unconsumed results.
"""
tasks_in_progress = collections.deque()
if max_active_tasks is None:
max_active_tasks = cpu_count()
# The following two functions facilitate GC by not adding extra variables to the enclosing scope.
def submit_task(task_iterator, executor, futures_queue):
retval = next(task_iterator, None)
if retval is None:
return False
task_callable, task_args, task_kwargs = retval
task_future = executor.submit(task_callable, *task_args, **task_kwargs)
futures_queue.append(task_future)
return True
def next_result(tasks_in_progress):
future = tasks_in_progress.popleft()
try:
result = future.result(timeout=THREAD_TIMEOUT_MAX)
except KeyboardInterrupt:
print('')
os._exit(os.EX_IOERR)
return result
if do_first_task_sequentially:
task_callable, task_args, task_kwargs = next(request_iterator)
yield task_callable(*task_args, **task_kwargs)
for _i in range(max_active_tasks):
retval = submit_task(request_iterator, thread_pool, tasks_in_progress)
if not retval:
break
while len(tasks_in_progress) > 0:
result = next_result(tasks_in_progress)
submit_task(request_iterator, thread_pool, tasks_in_progress)
yield result
del result
def string_buffer_length(buf):
orig_pos = buf.tell()
buf.seek(0, os.SEEK_END)
buf_len = buf.tell()
buf.seek(orig_pos)
return buf_len
def normalize_time_input(t, future=False, default_unit='ms'):
"""
:param default_unit: units of the input time *t*; must be one of "s" or
"ms". This param is only respected if *t* looks like an int (e.g.
"12345", 12345).
:type default_unit: string
Converts inputs such as:
"2012-05-01"
"-5d"
1352863174
"1352863174"
to milliseconds since epoch. See http://labix.org/python-dateutil and :meth:`normalize_timedelta`.
"""
error_msg = 'Error: Expected an int timestamp, a date format (e.g. YYYY-MM-DD), or an int with a single-letter suffix (s=seconds, m=minutes, h=hours, d=days, w=weeks, M=months, y=years; e.g. "-10d" indicates 10 days ago); but got {t}'
if isinstance(t, basestring) and t.isdigit():
t = int(t)
if isinstance(t, basestring):
try:
t = normalize_timedelta(t)
except ValueError:
try:
t = int(time.mktime(dateutil.parser.parse(t).timetuple())*1000)
assert t > 0
except (ValueError, OverflowError, AssertionError):
raise ValueError(error_msg.format(t=t))
elif isinstance(t, numbers.Integral):
units_multipliers = {'ms': 1, 's': 1000}
if default_unit not in units_multipliers:
raise ValueError("Expected default_unit to be one of 's' or 'ms'")
t = t * units_multipliers[default_unit]
else:
raise ValueError(error_msg.format(t=t))
now = int(time.time()*1000)
if t < 0 or (future and t < now):
t += now
return t
def normalize_timedelta(timedelta):
"""
Given a string like "1w" or "-5d", convert it to an integer in milliseconds.
Integers without a suffix are interpreted as seconds.
Note: not related to the datetime timedelta class.
"""
try:
return int(timedelta) * 1000
except ValueError as e:
t, suffix = timedelta[:-1], timedelta[-1:]
suffix_multipliers = {'s': 1000, 'm': 1000*60, 'h': 1000*60*60, 'd': 1000*60*60*24, 'w': 1000*60*60*24*7,
'M': 1000*60*60*24*30, 'y': 1000*60*60*24*365}
if suffix not in suffix_multipliers:
raise ValueError()
return int(t) * suffix_multipliers[suffix]
# See http://stackoverflow.com/questions/4126348
class OrderedDefaultdict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
def group_array_by_field(array, field='group'):
groups = OrderedDefaultdict(list)
for item in array:
if field not in item and None not in groups:
groups[None] = []
groups[item.get(field)].append(item)
return groups
def merge(d, u):
"""
Recursively updates a dictionary.
Example: merge({"a": {"b": 1, "c": 2}}, {"a": {"b": 3}}) = {"a": {"b": 3, "c": 2}}
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def _dict_raise_on_duplicates(ordered_pairs):
"""
Reject duplicate keys.
"""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def json_load_raise_on_duplicates(*args, **kwargs):
"""
Like json.load(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.load(*args, **kwargs)
def json_loads_raise_on_duplicates(*args, **kwargs):
"""
Like json.loads(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.loads(*args, **kwargs)
def warn(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class Nonce:
'''
Generates a nonce by using the system's random number generator. If it fails
it uses python's random library to generate a random long integer.
The nonce is the random number concatenated with the time.
'''
def __init__(self):
try:
self.nonce = "%s%f" % (str(binascii.hexlify(os.urandom(32))), time.time())
except:
random.seed(time.time())
self.nonce = "%s%f" % (str(random.getrandbits(8*26)), time.time())
def __str__(self):
return self.nonce
@staticmethod
def update_nonce(input_params):
'''
Static method to return a copy of the input dictionary with an
additional unique nonce
:param input: an input dictionary that may be empty
:type input: dict
:returns an extended copy of the input with an additional nonce field
The input dictionary is updated with a nonce only if does not already
have a non empty nonce
'''
input_cp = input_params.copy()
if len(input_cp.get('nonce', '')) == 0:
input_cp['nonce'] = str(Nonce())
return input_cp
# Moved to the bottom due to circular imports
from .exec_utils import run, convert_handlers_to_dxlinks, parse_args_as_job_input, entry_point, DXJSONEncoder | src/python/dxpy/utils/__init__.py | from __future__ import print_function, unicode_literals, division, absolute_import
import os, json, collections, concurrent.futures, traceback, sys, time, gc
from multiprocessing import cpu_count
import dateutil.parser
from .. import logger
from ..compat import basestring, THREAD_TIMEOUT_MAX
from ..exceptions import DXError
import numbers
import binascii
import random
def _force_quit(signum, frame):
# traceback.print_stack(frame)
os._exit(os.EX_IOERR)
def get_futures_threadpool(max_workers):
return concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
def wait_for_a_future(futures, print_traceback=False):
"""
Return the next future that completes. If a KeyboardInterrupt is
received, then the entire process is exited immediately. See
wait_for_all_futures for more notes.
"""
while True:
try:
future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX))
break
except concurrent.futures.TimeoutError:
pass
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
return future
def wait_for_all_futures(futures, print_traceback=False):
"""
Wait indefinitely for all futures in the input iterable to complete.
Use a timeout to enable interrupt handling.
Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in concurrent.futures.thread
will run, and issue blocking join() on all worker threads, requiring us to listen to events in worker threads
in order to enable timely exit in response to Ctrl-C.
Note: This still doesn't handle situations where Ctrl-C is pressed elsewhere in the code and there are worker
threads with long-running tasks.
Note: os._exit() doesn't work well with interactive mode (e.g. ipython). This may help:
import __main__ as main; if hasattr(main, '__file__'): os._exit() else: os.exit()
"""
try:
while True:
waited_futures = concurrent.futures.wait(futures, timeout=60)
if len(waited_futures.not_done) == 0:
break
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
def response_iterator(request_iterator, thread_pool, max_active_tasks=None, do_first_task_sequentially=True):
"""
:param request_iterator:
An iterator producing inputs for consumption by the worker pool.
:type request_iterator: iterator of callable, args, kwargs
:param thread_pool: thread pool to submit the requests to
:type thread_pool: concurrent.futures.thread.ThreadPoolExecutor
:param max_active_tasks:
The maximum number of tasks that may be either running or
waiting for consumption of their result. If not given, defaults
to the number of CPU cores on the machine.
:type max_active_tasks: int
:param do_first_task_sequentially:
If True, executes (and returns the result of) the first request
before submitting any other requests (the subsequent requests
are submitted with *max_active_tasks* parallelism).
:type do_first_task_sequentially: bool
Rate-limited asynchronous multithreaded task runner. Consumes tasks
from *request_iterator*. Yields their results in order, while
allowing up to *max_active_tasks* to run simultaneously. Unlike
concurrent.futures.Executor.map, prevents new tasks from starting
while there are *max_active_tasks* or more unconsumed results.
"""
tasks_in_progress = collections.deque()
if max_active_tasks is None:
max_active_tasks = cpu_count()
# The following two functions facilitate GC by not adding extra variables to the enclosing scope.
def submit_task(task_iterator, executor, futures_queue):
retval = next(task_iterator, None)
if retval is None:
return False
task_callable, task_args, task_kwargs = retval
task_future = executor.submit(task_callable, *task_args, **task_kwargs)
futures_queue.append(task_future)
return True
def next_result(tasks_in_progress):
future = tasks_in_progress.popleft()
try:
result = future.result(timeout=THREAD_TIMEOUT_MAX)
except KeyboardInterrupt:
print('')
os._exit(os.EX_IOERR)
return result
if do_first_task_sequentially:
task_callable, task_args, task_kwargs = next(request_iterator)
yield task_callable(*task_args, **task_kwargs)
for _i in range(max_active_tasks):
retval = submit_task(request_iterator, thread_pool, tasks_in_progress)
if not retval:
break
while len(tasks_in_progress) > 0:
result = next_result(tasks_in_progress)
submit_task(request_iterator, thread_pool, tasks_in_progress)
yield result
del result
def string_buffer_length(buf):
orig_pos = buf.tell()
buf.seek(0, os.SEEK_END)
buf_len = buf.tell()
buf.seek(orig_pos)
return buf_len
def normalize_time_input(t, future=False, default_unit='ms'):
"""
:param default_unit: units of the input time *t*; must be one of "s" or
"ms". This param is only respected if *t* looks like an int (e.g.
"12345", 12345).
:type default_unit: string
Converts inputs such as:
"2012-05-01"
"-5d"
1352863174
"1352863174"
to milliseconds since epoch. See http://labix.org/python-dateutil and :meth:`normalize_timedelta`.
"""
error_msg = 'Error: Expected an int timestamp, a date format (e.g. YYYY-MM-DD), or an int with a single-letter suffix (s=seconds, m=minutes, h=hours, d=days, w=weeks, M=months, y=years; e.g. "-10d" indicates 10 days ago); but got {t}'
if isinstance(t, basestring) and t.isdigit():
t = int(t)
if isinstance(t, basestring):
try:
t = normalize_timedelta(t)
except ValueError:
try:
t = int(time.mktime(dateutil.parser.parse(t).timetuple())*1000)
assert t > 0
except (ValueError, OverflowError, AssertionError):
raise ValueError(error_msg.format(t=t))
elif isinstance(t, numbers.Integral):
units_multipliers = {'ms': 1, 's': 1000}
if default_unit not in units_multipliers:
raise ValueError("Expected default_unit to be one of 's' or 'ms'")
t = t * units_multipliers[default_unit]
else:
raise ValueError(error_msg.format(t=t))
now = int(time.time()*1000)
if t < 0 or (future and t < now):
t += now
return t
def normalize_timedelta(timedelta):
"""
Given a string like "1w" or "-5d", convert it to an integer in milliseconds.
Integers without a suffix are interpreted as seconds.
Note: not related to the datetime timedelta class.
"""
try:
return int(timedelta) * 1000
except ValueError as e:
t, suffix = timedelta[:-1], timedelta[-1:]
suffix_multipliers = {'s': 1000, 'm': 1000*60, 'h': 1000*60*60, 'd': 1000*60*60*24, 'w': 1000*60*60*24*7,
'M': 1000*60*60*24*30, 'y': 1000*60*60*24*365}
if suffix not in suffix_multipliers:
raise ValueError()
return int(t) * suffix_multipliers[suffix]
# See http://stackoverflow.com/questions/4126348
class OrderedDefaultdict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
def group_array_by_field(array, field='group'):
groups = OrderedDefaultdict(list)
for item in array:
if field not in item and None not in groups:
groups[None] = []
groups[item.get(field)].append(item)
return groups
def merge(d, u):
"""
Recursively updates a dictionary.
Example: merge({"a": {"b": 1, "c": 2}}, {"a": {"b": 3}}) = {"a": {"b": 3, "c": 2}}
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def _dict_raise_on_duplicates(ordered_pairs):
"""
Reject duplicate keys.
"""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def json_load_raise_on_duplicates(*args, **kwargs):
"""
Like json.load(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.load(*args, **kwargs)
def json_loads_raise_on_duplicates(*args, **kwargs):
"""
Like json.loads(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.loads(*args, **kwargs)
def warn(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class Nonce:
'''
Generates a nonce by using the system's random number generator. If it fails
it uses python's random library to generate a random long integer.
The nonce is the random number concatenated with the time.
'''
def __init__(self):
try:
self.nonce = "%s%f" % (str(binascii.hexlify(os.urandom(32))), time.time())
except:
random.seed(time.time())
self.nonce = "%s%f" % (str(random.getrandbits(8*26)), time.time())
def __str__(self):
return self.nonce
@staticmethod
def update_nonce(input_params):
'''
Static method to return a copy of the input dictionary with an
additional unique nonce
:param input: an input dictionary that may be empty
:type input: dict
:returns an extended copy of the input with an additional nonce field
The input dictionary is updated with a nonce only if does not already
have a non empty nonce
'''
input_cp = input_params.copy()
if len(input_cp.get('nonce', '')) == 0:
input_cp['nonce'] = str(Nonce())
return input_cp
# Moved to the bottom due to circular imports
from .exec_utils import run, convert_handlers_to_dxlinks, parse_args_as_job_input, entry_point, DXJSONEncoder | 0.440951 | 0.163612 |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent_subprocess as subprocess
def test_communicate():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/sh'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
echo COUCOU
ls /DONOTEXIST/FORREAL
ls -d /tmp
echo DONE
exit
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == 'COUCOU\n/tmp\nDONE\n'
assert stderr == 'ls: cannot access /DONOTEXIST/FORREAL: No such file or directory\n'
import gevent
def test_communicate_nostderr():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/sh'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
echo COUCOU
ls /DONOTEXIST/FORREAL
ls -d /tmp
echo DONE
exit
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == 'COUCOU\n/tmp\nDONE\n'
assert stderr is None
def test_communicate_onlystdin():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/cat'], stdin=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
HELLO
PARTY
PEOPLE!!!!
LETS ROOOOCK!!!!
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout is None
assert stderr is None
def test_communicate_nostdin():
print 'spawn /bin/ls -d /tmp'
p = subprocess.Popen('/bin/ls -d /tmp'.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate()
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == '/tmp\n'
assert stderr == '' | test/test_communicate.py |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent_subprocess as subprocess
def test_communicate():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/sh'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
echo COUCOU
ls /DONOTEXIST/FORREAL
ls -d /tmp
echo DONE
exit
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == 'COUCOU\n/tmp\nDONE\n'
assert stderr == 'ls: cannot access /DONOTEXIST/FORREAL: No such file or directory\n'
import gevent
def test_communicate_nostderr():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/sh'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
echo COUCOU
ls /DONOTEXIST/FORREAL
ls -d /tmp
echo DONE
exit
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == 'COUCOU\n/tmp\nDONE\n'
assert stderr is None
def test_communicate_onlystdin():
print 'spawn /bin/sh...'
p = subprocess.Popen(['/bin/cat'], stdin=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate('''
HELLO
PARTY
PEOPLE!!!!
LETS ROOOOCK!!!!
''')
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout is None
assert stderr is None
def test_communicate_nostdin():
print 'spawn /bin/ls -d /tmp'
p = subprocess.Popen('/bin/ls -d /tmp'.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print 'communicate...'
stdout, stderr = p.communicate()
print 'stdout --\n', stdout
print 'stderr --\n', stderr
assert stdout == '/tmp\n'
assert stderr == '' | 0.402627 | 0.06078 |
# Standard library imports
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import pytest
# Local imports
from spyder.plugins.profiler.widgets import profilergui
# --- Helper methods
# -----------------------------------------------------------------------------
# --- Fixtures
# -----------------------------------------------------------------------------
@pytest.fixture
def profiler_datatree_bot(qtbot):
"""Set up Profiler widget."""
tree = profilergui.ProfilerDataTree(None)
qtbot.addWidget(tree)
tree.show()
yield tree
tree.destroy()
# --- Tests
# -----------------------------------------------------------------------------
def test_format_measure(profiler_datatree_bot):
""" Test ProfilerDataTree.format_measure()."""
tree = profiler_datatree_bot
fm = tree.format_measure
assert fm(125) == '125'
assert fm(1.25e-8) == '12.50 ns'
assert fm(1.25e-5) == '12.50 us'
assert fm(1.25e-2) == '12.50 ms'
assert fm(12.5) == '12.50 sec'
assert fm(125.5) == '2.5 min'
assert fm(12555.5) == '3h:29min'
assert fm(-125) == '125'
assert fm(-1.25e-8) == '12.50 ns'
assert fm(-1.25e-5) == '12.50 us'
assert fm(-1.25e-2) == '12.50 ms'
assert fm(-12.5) == '12.50 sec'
assert fm(-125.5) == '2.5 min'
assert fm(-12555.5) == '3h:29min'
def test_color_string(profiler_datatree_bot):
""" Test ProfilerDataTree.color_string()."""
tree = profiler_datatree_bot
cs = tree.color_string
tree.compare_file = 'test'
assert cs([5.0]) == ['5.00 sec', ['', 'black']]
assert cs([1.251e-5, 1.251e-5]) == ['12.51 us', ['', 'black']]
assert cs([5.0, 4.0]) == ['5.00 sec', ['+1000.00 ms', 'red']]
assert cs([4.0, 5.0]) == ['4.00 sec', ['-1000.00 ms', 'green']]
tree.compare_file = None
assert cs([4.0, 5.0]) == ['4.00 sec', ['', 'black']]
def test_format_output(profiler_datatree_bot):
""" Test ProfilerDataTree.format_output()."""
tree = profiler_datatree_bot
fo = tree.format_output
# Mock Stats class to be able to use fixed data for input.
class Stats:
stats = {}
tree.stats1 = [Stats(), Stats()]
tree.stats1[0].stats = {('key1'): (1, 1000, 3.5, 1.5, {}),
('key2'): (1, 1200, 2.0, 2.0, {})
}
tree.stats1[1].stats = {('key1'): (1, 1000, 3.7, 1.3, {}),
('key2'): (1, 1199, 2.4, 2.4, {})
}
tree.compare_file = 'test'
assert list((fo('key1'))) == [['1000', ['', 'black']],
['3.50 sec', ['-200.00 ms', 'green']],
['1.50 sec', ['+200.00 ms', 'red']]]
assert list((fo('key2'))) == [['1200', ['+1', 'red']],
['2.00 sec', ['-400.00 ms', 'green']],
['2.00 sec', ['-400.00 ms', 'green']]]
if __name__ == "__main__":
pytest.main() | spyder/plugins/profiler/tests/test_profiler.py | # Standard library imports
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import pytest
# Local imports
from spyder.plugins.profiler.widgets import profilergui
# --- Helper methods
# -----------------------------------------------------------------------------
# --- Fixtures
# -----------------------------------------------------------------------------
@pytest.fixture
def profiler_datatree_bot(qtbot):
"""Set up Profiler widget."""
tree = profilergui.ProfilerDataTree(None)
qtbot.addWidget(tree)
tree.show()
yield tree
tree.destroy()
# --- Tests
# -----------------------------------------------------------------------------
def test_format_measure(profiler_datatree_bot):
""" Test ProfilerDataTree.format_measure()."""
tree = profiler_datatree_bot
fm = tree.format_measure
assert fm(125) == '125'
assert fm(1.25e-8) == '12.50 ns'
assert fm(1.25e-5) == '12.50 us'
assert fm(1.25e-2) == '12.50 ms'
assert fm(12.5) == '12.50 sec'
assert fm(125.5) == '2.5 min'
assert fm(12555.5) == '3h:29min'
assert fm(-125) == '125'
assert fm(-1.25e-8) == '12.50 ns'
assert fm(-1.25e-5) == '12.50 us'
assert fm(-1.25e-2) == '12.50 ms'
assert fm(-12.5) == '12.50 sec'
assert fm(-125.5) == '2.5 min'
assert fm(-12555.5) == '3h:29min'
def test_color_string(profiler_datatree_bot):
""" Test ProfilerDataTree.color_string()."""
tree = profiler_datatree_bot
cs = tree.color_string
tree.compare_file = 'test'
assert cs([5.0]) == ['5.00 sec', ['', 'black']]
assert cs([1.251e-5, 1.251e-5]) == ['12.51 us', ['', 'black']]
assert cs([5.0, 4.0]) == ['5.00 sec', ['+1000.00 ms', 'red']]
assert cs([4.0, 5.0]) == ['4.00 sec', ['-1000.00 ms', 'green']]
tree.compare_file = None
assert cs([4.0, 5.0]) == ['4.00 sec', ['', 'black']]
def test_format_output(profiler_datatree_bot):
""" Test ProfilerDataTree.format_output()."""
tree = profiler_datatree_bot
fo = tree.format_output
# Mock Stats class to be able to use fixed data for input.
class Stats:
stats = {}
tree.stats1 = [Stats(), Stats()]
tree.stats1[0].stats = {('key1'): (1, 1000, 3.5, 1.5, {}),
('key2'): (1, 1200, 2.0, 2.0, {})
}
tree.stats1[1].stats = {('key1'): (1, 1000, 3.7, 1.3, {}),
('key2'): (1, 1199, 2.4, 2.4, {})
}
tree.compare_file = 'test'
assert list((fo('key1'))) == [['1000', ['', 'black']],
['3.50 sec', ['-200.00 ms', 'green']],
['1.50 sec', ['+200.00 ms', 'red']]]
assert list((fo('key2'))) == [['1200', ['+1', 'red']],
['2.00 sec', ['-400.00 ms', 'green']],
['2.00 sec', ['-400.00 ms', 'green']]]
if __name__ == "__main__":
pytest.main() | 0.792906 | 0.664186 |
import configparser as ConfigParser
from optparse import OptionParser
def str_to_bool(s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError
def read_conf(cfg_path, options):
cfg_file=options.cfg
Config = ConfigParser.ConfigParser()
Config.read(cfg_file)
#[windowing]
options.fs = int(Config.get('windowing', 'fs'))
options.cw_len = int(Config.get('windowing', 'cw_len'))
options.cw_shift = int(Config.get('windowing', 'cw_shift'))
#[cnn]
options.cnn_N_filt = list(map(int, Config.get('cnn', 'cnn_N_filt').split(',')))
options.cnn_len_filt = list(map(int, Config.get('cnn', 'cnn_len_filt').split(',')))
options.cnn_max_pool_len = list(map(int, Config.get('cnn', 'cnn_max_pool_len').split(',')))
options.cnn_use_laynorm_inp = str_to_bool(Config.get('cnn', 'cnn_use_laynorm_inp'))
options.cnn_use_batchnorm_inp = str_to_bool(Config.get('cnn', 'cnn_use_batchnorm_inp'))
options.cnn_use_laynorm = list(map(str_to_bool, Config.get('cnn', 'cnn_use_laynorm').split(',')))
options.cnn_use_batchnorm = list(map(str_to_bool, Config.get('cnn', 'cnn_use_batchnorm').split(',')))
options.cnn_act = list(map(str, Config.get('cnn', 'cnn_act').split(',')))
options.cnn_drop = list(map(float, Config.get('cnn', 'cnn_drop').split(',')))
#[dnn]
options.fc_lay = list(map(int, Config.get('dnn', 'fc_lay').split(',')))
options.fc_drop = list(map(float, Config.get('dnn', 'fc_drop').split(',')))
options.fc_use_laynorm_inp = str_to_bool(Config.get('dnn', 'fc_use_laynorm_inp'))
options.fc_use_batchnorm_inp = str_to_bool(Config.get('dnn', 'fc_use_batchnorm_inp'))
options.fc_use_batchnorm = list(map(str_to_bool, Config.get('dnn', 'fc_use_batchnorm').split(',')))
options.fc_use_laynorm = list(map(str_to_bool, Config.get('dnn', 'fc_use_laynorm').split(',')))
options.fc_act = list(map(str, Config.get('dnn', 'fc_act').split(',')))
#[class]
options.class_lay = list(map(int, Config.get('class', 'class_lay').split(',')))
options.class_drop = list(map(float, Config.get('class', 'class_drop').split(',')))
options.class_use_laynorm_inp = str_to_bool(Config.get('class', 'class_use_laynorm_inp'))
options.class_use_batchnorm_inp = str_to_bool(Config.get('class', 'class_use_batchnorm_inp'))
options.class_use_batchnorm = list(map(str_to_bool, Config.get('class', 'class_use_batchnorm').split(',')))
options.class_use_laynorm = list(map(str_to_bool, Config.get('class', 'class_use_laynorm').split(',')))
options.class_act = list(map(str, Config.get('class', 'class_act').split(',')))
#[optimization]
options.lr=float(Config.get('optimization', 'lr'))
options.batch_size=int(Config.get('optimization', 'batch_size'))
options.N_epochs=int(Config.get('optimization', 'N_epochs'))
options.N_eval_epoch=int(Config.get('optimization', 'N_eval_epoch'))
options.print_every=int(Config.get('optimization', 'print_every'))
options.oversampling=int(Config.get('optimization', 'oversampling'))
options.seed=int(Config.get('optimization', 'seed'))
return options | sincnet/utils.py | import configparser as ConfigParser
from optparse import OptionParser
def str_to_bool(s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError
def read_conf(cfg_path, options):
cfg_file=options.cfg
Config = ConfigParser.ConfigParser()
Config.read(cfg_file)
#[windowing]
options.fs = int(Config.get('windowing', 'fs'))
options.cw_len = int(Config.get('windowing', 'cw_len'))
options.cw_shift = int(Config.get('windowing', 'cw_shift'))
#[cnn]
options.cnn_N_filt = list(map(int, Config.get('cnn', 'cnn_N_filt').split(',')))
options.cnn_len_filt = list(map(int, Config.get('cnn', 'cnn_len_filt').split(',')))
options.cnn_max_pool_len = list(map(int, Config.get('cnn', 'cnn_max_pool_len').split(',')))
options.cnn_use_laynorm_inp = str_to_bool(Config.get('cnn', 'cnn_use_laynorm_inp'))
options.cnn_use_batchnorm_inp = str_to_bool(Config.get('cnn', 'cnn_use_batchnorm_inp'))
options.cnn_use_laynorm = list(map(str_to_bool, Config.get('cnn', 'cnn_use_laynorm').split(',')))
options.cnn_use_batchnorm = list(map(str_to_bool, Config.get('cnn', 'cnn_use_batchnorm').split(',')))
options.cnn_act = list(map(str, Config.get('cnn', 'cnn_act').split(',')))
options.cnn_drop = list(map(float, Config.get('cnn', 'cnn_drop').split(',')))
#[dnn]
options.fc_lay = list(map(int, Config.get('dnn', 'fc_lay').split(',')))
options.fc_drop = list(map(float, Config.get('dnn', 'fc_drop').split(',')))
options.fc_use_laynorm_inp = str_to_bool(Config.get('dnn', 'fc_use_laynorm_inp'))
options.fc_use_batchnorm_inp = str_to_bool(Config.get('dnn', 'fc_use_batchnorm_inp'))
options.fc_use_batchnorm = list(map(str_to_bool, Config.get('dnn', 'fc_use_batchnorm').split(',')))
options.fc_use_laynorm = list(map(str_to_bool, Config.get('dnn', 'fc_use_laynorm').split(',')))
options.fc_act = list(map(str, Config.get('dnn', 'fc_act').split(',')))
#[class]
options.class_lay = list(map(int, Config.get('class', 'class_lay').split(',')))
options.class_drop = list(map(float, Config.get('class', 'class_drop').split(',')))
options.class_use_laynorm_inp = str_to_bool(Config.get('class', 'class_use_laynorm_inp'))
options.class_use_batchnorm_inp = str_to_bool(Config.get('class', 'class_use_batchnorm_inp'))
options.class_use_batchnorm = list(map(str_to_bool, Config.get('class', 'class_use_batchnorm').split(',')))
options.class_use_laynorm = list(map(str_to_bool, Config.get('class', 'class_use_laynorm').split(',')))
options.class_act = list(map(str, Config.get('class', 'class_act').split(',')))
#[optimization]
options.lr=float(Config.get('optimization', 'lr'))
options.batch_size=int(Config.get('optimization', 'batch_size'))
options.N_epochs=int(Config.get('optimization', 'N_epochs'))
options.N_eval_epoch=int(Config.get('optimization', 'N_eval_epoch'))
options.print_every=int(Config.get('optimization', 'print_every'))
options.oversampling=int(Config.get('optimization', 'oversampling'))
options.seed=int(Config.get('optimization', 'seed'))
return options | 0.291687 | 0.072276 |
import gym
import gym_reacher
import time
import os
import numpy as np
import pandas as pd
import optuna
import yaml
from pathlib import Path
from stable_baselines import A2C, ACKTR, DDPG, PPO1, PPO2, SAC, TRPO, TD3
from stable_baselines import results_plotter
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines.deepq.policies import MlpPolicy as mlp_dqn
from stable_baselines.sac.policies import MlpPolicy as mlp_sac
from stable_baselines.ddpg.policies import MlpPolicy as mlp_ddpg
from stable_baselines.td3.policies import MlpPolicy as mlp_td3
from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common import set_global_seeds
from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.common.vec_env import DummyVecEnv, VecCheckNan
class NanAndInfEnv(gym.Env):
"""Custom Environment that raised NaNs and Infs"""
metadata = {'render.modes': ['human']}
def __init__(self):
super(NanAndInfEnv, self).__init__()
self.action_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
def step(self, _action):
randf = np.random.rand()
if randf > 0.99:
obs = float('NaN')
elif randf > 0.98:
obs = float('inf')
else:
obs = randf
return [obs], 0.0, False, {}
def reset(self):
return [0.0]
def render(self, mode='human', close=False):
pass
def make_env(env_id, rank, seed=0):
"""
Utility function for multiprocessed env.
:param env_id: (str) the environment ID
:param seed: (int) the inital seed for RNG
:param rank: (int) index of the subprocess
"""
def _init():
env = gym.make(env_id)
# Important: use a different seed for each environment
env.seed(seed + rank)
return env
set_global_seeds(seed)
return _init
def evaluate_multi(model, env, num_steps=1000):
"""
Evaluate a RL agent
:param model: (BaseRLModel object) the RL Agent
:param num_steps: (int) number of timesteps to evaluate it
:return: (float) Mean reward
"""
episode_rewards = [[0.0] for _ in range(env.num_envs)]
obs = env.reset()
for i in range(num_steps):
# _states are only useful when using LSTM policies
actions, _states = model.predict(obs, deterministic=True)
# here, action, rewards and dones are arrays
# because we are using vectorized env
obs, rewards, dones, info = env.step(actions)
# Stats
for i in range(env.num_envs):
episode_rewards[i][-1] += rewards[i]
if dones[i]:
episode_rewards[i].append(0.0)
mean_rewards = [0.0 for _ in range(env.num_envs)]
n_episodes = 0
for i in range(env.num_envs):
mean_rewards[i] = np.mean(episode_rewards[i])
n_episodes += len(episode_rewards[i])
# Compute mean reward
mean_reward = round(np.mean(mean_rewards), 1)
print("Mean reward:", mean_reward, "Num episodes:", n_episodes)
return mean_reward
def sample_ppo2_params(trial):
"""
Sampler for PPO2 hyperparams.
:param trial: (optuna.trial)
:return: (dict)
"""
gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
n_steps = trial.suggest_categorical('n_steps', [16, 32, 64, 128, 256, 512, 1024, 2048])
ent_coef = trial.suggest_loguniform('ent_coef', 0.00000001, 0.1)
learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1)
lam = trial.suggest_categorical('lam', [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
noptepochs = trial.suggest_categorical('noptepochs', [1, 5, 10, 20, 30, 50])
cliprange = trial.suggest_categorical('cliprange', [0.1, 0.2, 0.3, 0.4])
batch_size = trial.suggest_categorical('batch_size', [32, 64, 128, 256])
if n_steps < batch_size:
nminibatches = 1
else:
nminibatches = int(n_steps / batch_size)
return {
'gamma': gamma,
'n_steps': n_steps,
'ent_coef': ent_coef,
'learning_rate': learning_rate,
'lam': lam,
'nminibatches': nminibatches,
'noptepochs': noptepochs,
'cliprange': cliprange
}
n_env = 4
env_id = 'Reacher3Dof-v0'
def create_env(env_id):
# check for NaNs (I get NaNs when using DummyVecEnv)
# env = DummyVecEnv([lambda: NanAndInfEnv()])
# env = VecCheckNan(env, raise_exception=True)
env = gym.make(env_id)
env = DummyVecEnv([lambda: env])
# env = DummyVecEnv([make_env(env_id, i, seed=0) for i in range(n_env)])
# env = SubprocVecEnv([make_env(env_id, i) for i in range(n_env)])
# env = VecNormalize(env)
eval_env = gym.make(env_id)
eval_env = DummyVecEnv([lambda: eval_env])
# eval_env = DummyVecEnv([make_env(env_id, i, seed=0) for i in range(n_env)])
# eval_env = SubprocVecEnv([make_env(env_id, i) for i in range(n_env)])
# eval_env = VecNormalize(eval_env)
return env, eval_env
def optimize_agent(trial):
""" Train the model and optimise
Optuna maximises the negative log likelihood, so we
need to negate the reward here
"""
model_params = sample_ppo2_params(trial)
print(model_params)
env, eval_env = create_env(env_id)
model = PPO2(MlpPolicy, env, verbose=0, **model_params)
start_train = time.time()
model.learn(total_timesteps=10000)
end_train = time.time()
print("opti train time (s): ", end_train-start_train)
# mean_reward, _ = evaluate_policy(model, eval_env, n_eval_episodes=20) # cannot use with multiprocess
# mean_reward = evaluate_single(model, eval_env, num_steps=1000)
start_eval = time.time()
mean_reward = evaluate_multi(model, eval_env, num_steps=2000)
end_eval = time.time()
print("opti eval time (s): ", end_eval-start_eval)
return -1 * mean_reward
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(optimize_agent, n_trials=2, n_jobs=-1)
best_params = study.best_params
if best_params['n_steps'] < best_params['batch_size']:
best_params['nminibatches'] = 1
else:
best_params['nminibatches'] = int(best_params['n_steps'] / best_params['batch_size'])
del best_params['batch_size'] # batch_size is not a PPO2 parameter
print("best params: ", best_params)
# print("best value: ", study.best_value)
# print("best best trial: ", study.best_trial)
with open('../results/hyperparameter.yml', 'w') as outfile:
yaml.dump(best_params, outfile)
env, eval_env = create_env(env_id)
model = PPO2(MlpPolicy, env, verbose=1, **best_params)
start = time.time()
model.learn(total_timesteps=10000)
end = time.time()
print("training time (s): ", end-start)
# mean_reward, _ = evaluate_policy(model, eval_env, n_eval_episodes=20) # cannot use with multiprocess
# mean_reward = evaluate_single(model, eval_env, num_steps=1000)
mean_reward = evaluate_multi(model, eval_env, num_steps=10000) | scripts/13_hyperparameter_tuning.py | import gym
import gym_reacher
import time
import os
import numpy as np
import pandas as pd
import optuna
import yaml
from pathlib import Path
from stable_baselines import A2C, ACKTR, DDPG, PPO1, PPO2, SAC, TRPO, TD3
from stable_baselines import results_plotter
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines.deepq.policies import MlpPolicy as mlp_dqn
from stable_baselines.sac.policies import MlpPolicy as mlp_sac
from stable_baselines.ddpg.policies import MlpPolicy as mlp_ddpg
from stable_baselines.td3.policies import MlpPolicy as mlp_td3
from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common import set_global_seeds
from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.common.vec_env import DummyVecEnv, VecCheckNan
class NanAndInfEnv(gym.Env):
"""Custom Environment that raised NaNs and Infs"""
metadata = {'render.modes': ['human']}
def __init__(self):
super(NanAndInfEnv, self).__init__()
self.action_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
def step(self, _action):
randf = np.random.rand()
if randf > 0.99:
obs = float('NaN')
elif randf > 0.98:
obs = float('inf')
else:
obs = randf
return [obs], 0.0, False, {}
def reset(self):
return [0.0]
def render(self, mode='human', close=False):
pass
def make_env(env_id, rank, seed=0):
"""
Utility function for multiprocessed env.
:param env_id: (str) the environment ID
:param seed: (int) the inital seed for RNG
:param rank: (int) index of the subprocess
"""
def _init():
env = gym.make(env_id)
# Important: use a different seed for each environment
env.seed(seed + rank)
return env
set_global_seeds(seed)
return _init
def evaluate_multi(model, env, num_steps=1000):
"""
Evaluate a RL agent
:param model: (BaseRLModel object) the RL Agent
:param num_steps: (int) number of timesteps to evaluate it
:return: (float) Mean reward
"""
episode_rewards = [[0.0] for _ in range(env.num_envs)]
obs = env.reset()
for i in range(num_steps):
# _states are only useful when using LSTM policies
actions, _states = model.predict(obs, deterministic=True)
# here, action, rewards and dones are arrays
# because we are using vectorized env
obs, rewards, dones, info = env.step(actions)
# Stats
for i in range(env.num_envs):
episode_rewards[i][-1] += rewards[i]
if dones[i]:
episode_rewards[i].append(0.0)
mean_rewards = [0.0 for _ in range(env.num_envs)]
n_episodes = 0
for i in range(env.num_envs):
mean_rewards[i] = np.mean(episode_rewards[i])
n_episodes += len(episode_rewards[i])
# Compute mean reward
mean_reward = round(np.mean(mean_rewards), 1)
print("Mean reward:", mean_reward, "Num episodes:", n_episodes)
return mean_reward
def sample_ppo2_params(trial):
"""
Sampler for PPO2 hyperparams.
:param trial: (optuna.trial)
:return: (dict)
"""
gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
n_steps = trial.suggest_categorical('n_steps', [16, 32, 64, 128, 256, 512, 1024, 2048])
ent_coef = trial.suggest_loguniform('ent_coef', 0.00000001, 0.1)
learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1)
lam = trial.suggest_categorical('lam', [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
noptepochs = trial.suggest_categorical('noptepochs', [1, 5, 10, 20, 30, 50])
cliprange = trial.suggest_categorical('cliprange', [0.1, 0.2, 0.3, 0.4])
batch_size = trial.suggest_categorical('batch_size', [32, 64, 128, 256])
if n_steps < batch_size:
nminibatches = 1
else:
nminibatches = int(n_steps / batch_size)
return {
'gamma': gamma,
'n_steps': n_steps,
'ent_coef': ent_coef,
'learning_rate': learning_rate,
'lam': lam,
'nminibatches': nminibatches,
'noptepochs': noptepochs,
'cliprange': cliprange
}
n_env = 4
env_id = 'Reacher3Dof-v0'
def create_env(env_id):
# check for NaNs (I get NaNs when using DummyVecEnv)
# env = DummyVecEnv([lambda: NanAndInfEnv()])
# env = VecCheckNan(env, raise_exception=True)
env = gym.make(env_id)
env = DummyVecEnv([lambda: env])
# env = DummyVecEnv([make_env(env_id, i, seed=0) for i in range(n_env)])
# env = SubprocVecEnv([make_env(env_id, i) for i in range(n_env)])
# env = VecNormalize(env)
eval_env = gym.make(env_id)
eval_env = DummyVecEnv([lambda: eval_env])
# eval_env = DummyVecEnv([make_env(env_id, i, seed=0) for i in range(n_env)])
# eval_env = SubprocVecEnv([make_env(env_id, i) for i in range(n_env)])
# eval_env = VecNormalize(eval_env)
return env, eval_env
def optimize_agent(trial):
""" Train the model and optimise
Optuna maximises the negative log likelihood, so we
need to negate the reward here
"""
model_params = sample_ppo2_params(trial)
print(model_params)
env, eval_env = create_env(env_id)
model = PPO2(MlpPolicy, env, verbose=0, **model_params)
start_train = time.time()
model.learn(total_timesteps=10000)
end_train = time.time()
print("opti train time (s): ", end_train-start_train)
# mean_reward, _ = evaluate_policy(model, eval_env, n_eval_episodes=20) # cannot use with multiprocess
# mean_reward = evaluate_single(model, eval_env, num_steps=1000)
start_eval = time.time()
mean_reward = evaluate_multi(model, eval_env, num_steps=2000)
end_eval = time.time()
print("opti eval time (s): ", end_eval-start_eval)
return -1 * mean_reward
if __name__ == '__main__':
study = optuna.create_study()
study.optimize(optimize_agent, n_trials=2, n_jobs=-1)
best_params = study.best_params
if best_params['n_steps'] < best_params['batch_size']:
best_params['nminibatches'] = 1
else:
best_params['nminibatches'] = int(best_params['n_steps'] / best_params['batch_size'])
del best_params['batch_size'] # batch_size is not a PPO2 parameter
print("best params: ", best_params)
# print("best value: ", study.best_value)
# print("best best trial: ", study.best_trial)
with open('../results/hyperparameter.yml', 'w') as outfile:
yaml.dump(best_params, outfile)
env, eval_env = create_env(env_id)
model = PPO2(MlpPolicy, env, verbose=1, **best_params)
start = time.time()
model.learn(total_timesteps=10000)
end = time.time()
print("training time (s): ", end-start)
# mean_reward, _ = evaluate_policy(model, eval_env, n_eval_episodes=20) # cannot use with multiprocess
# mean_reward = evaluate_single(model, eval_env, num_steps=1000)
mean_reward = evaluate_multi(model, eval_env, num_steps=10000) | 0.688154 | 0.369059 |
from scheduler.exceptions import KubeHTTPException
from scheduler.resources import Resource
from scheduler.utils import dict_merge
class Service(Resource):
short_name = 'svc'
def get(self, namespace, name=None, **kwargs):
"""
Fetch a single Service or a list
"""
url = '/namespaces/{}/services'
args = [namespace]
if name is not None:
args.append(name)
url += '/{}'
message = 'get Service "{}" in Namespace "{}"'
else:
message = 'get Services in Namespace "{}"'
url = self.api(url, *args)
response = self.http_get(url, params=self.query_params(**kwargs))
if self.unhealthy(response.status_code):
args.reverse() # error msg is in reverse order
raise KubeHTTPException(response, message, *args)
return response
def create(self, namespace, name, **kwargs):
# Ports and app type will be overwritten as required
manifest = {
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': name,
'labels': {
'app': namespace,
'heritage': 'deis'
},
'annotations': {}
},
'spec': {
'ports': [{
'name': 'http',
'port': 80,
'targetPort': 5000,
'protocol': 'TCP'
}],
'selector': {
'app': namespace,
'heritage': 'deis'
}
}
}
data = dict_merge(manifest, kwargs.get('data', {}))
url = self.api("/namespaces/{}/services", namespace)
response = self.http_post(url, json=data)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'create Service "{}" in Namespace "{}"', namespace, namespace
)
return response
def update(self, namespace, name, data):
url = self.api("/namespaces/{}/services/{}", namespace, name)
response = self.http_put(url, json=data)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'update Service "{}" in Namespace "{}"', namespace, name
)
return response
def delete(self, namespace, name):
url = self.api("/namespaces/{}/services/{}", namespace, name)
response = self.http_delete(url)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'delete Service "{}" in Namespace "{}"', name, namespace
)
return response | rootfs/scheduler/resources/service.py | from scheduler.exceptions import KubeHTTPException
from scheduler.resources import Resource
from scheduler.utils import dict_merge
class Service(Resource):
short_name = 'svc'
def get(self, namespace, name=None, **kwargs):
"""
Fetch a single Service or a list
"""
url = '/namespaces/{}/services'
args = [namespace]
if name is not None:
args.append(name)
url += '/{}'
message = 'get Service "{}" in Namespace "{}"'
else:
message = 'get Services in Namespace "{}"'
url = self.api(url, *args)
response = self.http_get(url, params=self.query_params(**kwargs))
if self.unhealthy(response.status_code):
args.reverse() # error msg is in reverse order
raise KubeHTTPException(response, message, *args)
return response
def create(self, namespace, name, **kwargs):
# Ports and app type will be overwritten as required
manifest = {
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': name,
'labels': {
'app': namespace,
'heritage': 'deis'
},
'annotations': {}
},
'spec': {
'ports': [{
'name': 'http',
'port': 80,
'targetPort': 5000,
'protocol': 'TCP'
}],
'selector': {
'app': namespace,
'heritage': 'deis'
}
}
}
data = dict_merge(manifest, kwargs.get('data', {}))
url = self.api("/namespaces/{}/services", namespace)
response = self.http_post(url, json=data)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'create Service "{}" in Namespace "{}"', namespace, namespace
)
return response
def update(self, namespace, name, data):
url = self.api("/namespaces/{}/services/{}", namespace, name)
response = self.http_put(url, json=data)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'update Service "{}" in Namespace "{}"', namespace, name
)
return response
def delete(self, namespace, name):
url = self.api("/namespaces/{}/services/{}", namespace, name)
response = self.http_delete(url)
if self.unhealthy(response.status_code):
raise KubeHTTPException(
response,
'delete Service "{}" in Namespace "{}"', name, namespace
)
return response | 0.505615 | 0.0771 |
import asyncio
import aiohttp
BASE_URL = "http://0.0.0.0:8080"
async def login(session, credentials: dict) -> dict:
"""Retrieve token with credentials"""
resp = await session.post(f"{BASE_URL}/auth", json=credentials)
assert resp.status == 200, f"Authentication Failed, {resp.reason}"
token_payload = await resp.json()
return token_payload
async def refresh(session, access_token: str, refresh_token: str) -> str:
"""Retrieve new access token with refresh token."""
resp = await session.post(
f"{BASE_URL}/auth/refresh",
json={"refresh_token": refresh_token},
headers={"Authorization": f"Bearer {access_token}"},
)
assert resp.status == 200, f"Failed to refresh, {resp.reason}"
token_payload = await resp.json()
return token_payload["access_token"]
async def get_protected_data(session, access_token: str) -> dict:
"""Fetch data from the protected route"""
resp = await session.get(
f"{BASE_URL}/protected", headers={"Authorization": f"Bearer {access_token}"}
)
data = await resp.json()
return data
async def main():
async with aiohttp.ClientSession() as session:
credentials = {"name": "david"}
print("Logging in.")
token_payload = await login(session, credentials)
access_token = token_payload["access_token"]
print("Fetching data with access token.")
data = await get_protected_data(session, access_token)
print(data)
print("Waiting access token to expire.")
await asyncio.sleep(3.2)
print("Trying to fetch the data with expired token.")
response = await get_protected_data(session, access_token)
print(response)
print("Refreshing the access token.")
refresh_token = token_payload["refresh_token"]
access_token = await refresh(session, access_token, refresh_token)
print("Fetching data with new access token.")
data = await get_protected_data(session, access_token)
print(data)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
data = loop.run_until_complete(main()) | examples/refresh_token/cli.py | import asyncio
import aiohttp
BASE_URL = "http://0.0.0.0:8080"
async def login(session, credentials: dict) -> dict:
"""Retrieve token with credentials"""
resp = await session.post(f"{BASE_URL}/auth", json=credentials)
assert resp.status == 200, f"Authentication Failed, {resp.reason}"
token_payload = await resp.json()
return token_payload
async def refresh(session, access_token: str, refresh_token: str) -> str:
"""Retrieve new access token with refresh token."""
resp = await session.post(
f"{BASE_URL}/auth/refresh",
json={"refresh_token": refresh_token},
headers={"Authorization": f"Bearer {access_token}"},
)
assert resp.status == 200, f"Failed to refresh, {resp.reason}"
token_payload = await resp.json()
return token_payload["access_token"]
async def get_protected_data(session, access_token: str) -> dict:
"""Fetch data from the protected route"""
resp = await session.get(
f"{BASE_URL}/protected", headers={"Authorization": f"Bearer {access_token}"}
)
data = await resp.json()
return data
async def main():
async with aiohttp.ClientSession() as session:
credentials = {"name": "david"}
print("Logging in.")
token_payload = await login(session, credentials)
access_token = token_payload["access_token"]
print("Fetching data with access token.")
data = await get_protected_data(session, access_token)
print(data)
print("Waiting access token to expire.")
await asyncio.sleep(3.2)
print("Trying to fetch the data with expired token.")
response = await get_protected_data(session, access_token)
print(response)
print("Refreshing the access token.")
refresh_token = token_payload["refresh_token"]
access_token = await refresh(session, access_token, refresh_token)
print("Fetching data with new access token.")
data = await get_protected_data(session, access_token)
print(data)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
data = loop.run_until_complete(main()) | 0.627267 | 0.28143 |
import numpy as np
import pandas as pd
import itertools as it
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from source.engine import funciones as f
from source.engine.InputsNoRevolvente import InputsNoRevolvente
from source.engine.OutputsNoRevolvente import OutputsNoRevolvente
#CAMBIAR
nombreproducto = 'veh'
inicio = 201901
fin = 202012
agregado_cortes=['C_SEGMENTO','C_MONEDA','C_PLAZO','C_OK'] # Gahi & Veh
#agregado_cortes=['C_SEGMENTO','C_PLAZO','C_OK'] # Hipot & MiViv
#agregado_cortes=['C_SEGMENTO','C_MONEDA','C_PLAZO','C_CANAL','C_OK'] # CEF
#agregado_cortes=['C_PRODUCTO','C_PYG','C_OK'] # Pyme
lista_cortes=[['C_SEGMENTO'],['C_MONEDA'],['C_PLAZO'],['C_OK']] # Gahi & Veh
#lista_cortes=[['C_SEGMENTO'],['C_PLAZO'],['C_OK']] # Hipot & MiViv
#lista_cortes=[['C_SEGMENTO'],['C_MONEDA'],['C_PLAZO'],['C_CANAL'],['C_OK']] # CEF
#lista_cortes=[['C_PRODUCTO'],['C_PYG'],['C_OK']] # Pyme
ruta_real=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_reales.csv']
ruta_teorico=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_inputs.csv']
ruta_tmin=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_precios.csv']
n = len(agregado_cortes)
REAL = pd.read_csv(ruta_real[0])#, encoding='latin-1')
TEORICO = pd.read_csv(ruta_teorico[0])#, encoding='latin-1')
TMIN = pd.read_csv(ruta_tmin[0])#, encoding='latin-1')
product = InputsNoRevolvente(REAL,TEORICO,mincosecha=inicio,maxcosecha=fin)
#Inputs
product.condensar(agregado_cortes)
product.optimizar()
a = product.promedios
b = product.stats.drop(product.stats.iloc[:, 0:(n+1)], axis = 1)
#Tmin
product.impactoTmin(TMIN)
c = product.Tmin.drop(product.Tmin.iloc[:, 0:(n+1)], axis = 1)
#TIR
product.impactoTIR(TMIN)
d = product.TIR.drop(product.TIR.iloc[:, 0:(n+1)], axis = 1)
#Outputs
product = OutputsNoRevolvente(REAL,TEORICO,mincosecha=inicio,maxcosecha=fin)
product.condensar(agregado_cortes)
e = product.ratios.drop(product.ratios.iloc[:, 0:(n+2)], axis = 1)
f = product.niveles.drop(product.niveles.iloc[:, 0:(n+2)], axis = 1)
agregado = pd.concat([a,b,c,d,e,f], axis=1) #<- añadir c,d,e,f
first = True
for corte in lista_cortes:
condensado = agregado.groupby(corte).size().reset_index().rename(columns={0:'descartar'}).drop('descartar',1)
for j in range(len(condensado)):
temp = agregado.loc[agregado[corte[0]] == condensado.loc[j,corte[0]]]
r = temp['recuento']
m = temp['Monto']
e = temp['Capital promedio']
s = temp['n_saldo_real']
condensado.at[j,'recuento'] = sum(r)
for k in ['pd_real','can_real','pre_real','pd_teorico','can_teorico','pre_teorico','pd_optimo','can_optimo','pre_optimo','MAE_pd','MAE_can','MAE_pre','MAEop_pd','MAEop_can','MAEop_pre','scalar_pd','scalar_can','scalar_pre']:
condensado.at[j,k] = sum(temp[k] * r) / sum(r)
for k in ['Tmin_base','delta_Tmin_pd','delta_Tmin_can','delta_Tmin_pre','Tmin_final']:
condensado.at[j,k] = sum(temp[k] * m) / sum(m)
condensado.at[j,'Monto'] = sum(m)
for k in ['TIR_base','delta_TIR_pd','delta_TIR_can','delta_TIR_pre','TIR_final']:
condensado.at[j,k] = sum(temp[k] * e) / sum(e)
condensado.at[j,'Capital promedio'] = sum(e)
for k in ['r_if_real','r_ef_real','r_spread_bruto_real','r_if_teorico','r_ef_teorico','r_spread_bruto_teorico']:
condensado.at[j,k] = sum(temp[k] * s) / sum(s)
for k in ['n_if_real','n_ef_real','n_saldo_real','n_if_teorico','n_ef_teorico','n_saldo_teorico']:
condensado.at[j,k] = sum(temp[k])
nametemp=condensado.columns[0]
condensado.rename(columns={nametemp:"CORTE"}, inplace=True)
if first==True:
imprimir = condensado
else:
imprimir = imprimir.append(condensado,ignore_index=True)
first=False
print(imprimir)
imprimir.to_excel(str(nombreproducto)+"_PlanchaPonderada.xlsx") | ExtraccionNoRev.py | import numpy as np
import pandas as pd
import itertools as it
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from source.engine import funciones as f
from source.engine.InputsNoRevolvente import InputsNoRevolvente
from source.engine.OutputsNoRevolvente import OutputsNoRevolvente
#CAMBIAR
nombreproducto = 'veh'
inicio = 201901
fin = 202012
agregado_cortes=['C_SEGMENTO','C_MONEDA','C_PLAZO','C_OK'] # Gahi & Veh
#agregado_cortes=['C_SEGMENTO','C_PLAZO','C_OK'] # Hipot & MiViv
#agregado_cortes=['C_SEGMENTO','C_MONEDA','C_PLAZO','C_CANAL','C_OK'] # CEF
#agregado_cortes=['C_PRODUCTO','C_PYG','C_OK'] # Pyme
lista_cortes=[['C_SEGMENTO'],['C_MONEDA'],['C_PLAZO'],['C_OK']] # Gahi & Veh
#lista_cortes=[['C_SEGMENTO'],['C_PLAZO'],['C_OK']] # Hipot & MiViv
#lista_cortes=[['C_SEGMENTO'],['C_MONEDA'],['C_PLAZO'],['C_CANAL'],['C_OK']] # CEF
#lista_cortes=[['C_PRODUCTO'],['C_PYG'],['C_OK']] # Pyme
ruta_real=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_reales.csv']
ruta_teorico=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_inputs.csv']
ruta_tmin=['/Users/renzomartinch/Downloads/Comite/'+str(nombreproducto)+'_precios.csv']
n = len(agregado_cortes)
REAL = pd.read_csv(ruta_real[0])#, encoding='latin-1')
TEORICO = pd.read_csv(ruta_teorico[0])#, encoding='latin-1')
TMIN = pd.read_csv(ruta_tmin[0])#, encoding='latin-1')
product = InputsNoRevolvente(REAL,TEORICO,mincosecha=inicio,maxcosecha=fin)
#Inputs
product.condensar(agregado_cortes)
product.optimizar()
a = product.promedios
b = product.stats.drop(product.stats.iloc[:, 0:(n+1)], axis = 1)
#Tmin
product.impactoTmin(TMIN)
c = product.Tmin.drop(product.Tmin.iloc[:, 0:(n+1)], axis = 1)
#TIR
product.impactoTIR(TMIN)
d = product.TIR.drop(product.TIR.iloc[:, 0:(n+1)], axis = 1)
#Outputs
product = OutputsNoRevolvente(REAL,TEORICO,mincosecha=inicio,maxcosecha=fin)
product.condensar(agregado_cortes)
e = product.ratios.drop(product.ratios.iloc[:, 0:(n+2)], axis = 1)
f = product.niveles.drop(product.niveles.iloc[:, 0:(n+2)], axis = 1)
agregado = pd.concat([a,b,c,d,e,f], axis=1) #<- añadir c,d,e,f
first = True
for corte in lista_cortes:
condensado = agregado.groupby(corte).size().reset_index().rename(columns={0:'descartar'}).drop('descartar',1)
for j in range(len(condensado)):
temp = agregado.loc[agregado[corte[0]] == condensado.loc[j,corte[0]]]
r = temp['recuento']
m = temp['Monto']
e = temp['Capital promedio']
s = temp['n_saldo_real']
condensado.at[j,'recuento'] = sum(r)
for k in ['pd_real','can_real','pre_real','pd_teorico','can_teorico','pre_teorico','pd_optimo','can_optimo','pre_optimo','MAE_pd','MAE_can','MAE_pre','MAEop_pd','MAEop_can','MAEop_pre','scalar_pd','scalar_can','scalar_pre']:
condensado.at[j,k] = sum(temp[k] * r) / sum(r)
for k in ['Tmin_base','delta_Tmin_pd','delta_Tmin_can','delta_Tmin_pre','Tmin_final']:
condensado.at[j,k] = sum(temp[k] * m) / sum(m)
condensado.at[j,'Monto'] = sum(m)
for k in ['TIR_base','delta_TIR_pd','delta_TIR_can','delta_TIR_pre','TIR_final']:
condensado.at[j,k] = sum(temp[k] * e) / sum(e)
condensado.at[j,'Capital promedio'] = sum(e)
for k in ['r_if_real','r_ef_real','r_spread_bruto_real','r_if_teorico','r_ef_teorico','r_spread_bruto_teorico']:
condensado.at[j,k] = sum(temp[k] * s) / sum(s)
for k in ['n_if_real','n_ef_real','n_saldo_real','n_if_teorico','n_ef_teorico','n_saldo_teorico']:
condensado.at[j,k] = sum(temp[k])
nametemp=condensado.columns[0]
condensado.rename(columns={nametemp:"CORTE"}, inplace=True)
if first==True:
imprimir = condensado
else:
imprimir = imprimir.append(condensado,ignore_index=True)
first=False
print(imprimir)
imprimir.to_excel(str(nombreproducto)+"_PlanchaPonderada.xlsx") | 0.100746 | 0.121061 |
import jinja2 as jinja2
import pytest as pytest
from localstack.utils.common import short_uid
from localstack.utils.generic.wait_utils import wait_until
from tests.integration.cloudformation.test_cloudformation_changesets import load_template_raw
def test_resolve_ssm(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Value=parameter_value, Type="String")
template_rendered = jinja2.Template(load_template_raw("resolve_ssm.yaml")).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
def test_resolve_ssm_withversion(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value_v0 = f"param-value-{short_uid()}"
parameter_value_v1 = f"param-value-{short_uid()}"
parameter_value_v2 = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Type="String", Value=parameter_value_v0)
v1 = ssm_client.put_parameter(
Name=parameter_key, Overwrite=True, Type="String", Value=parameter_value_v1
)
ssm_client.put_parameter(
Name=parameter_key, Overwrite=True, Type="String", Value=parameter_value_v2
)
template_rendered = jinja2.Template(load_template_raw("resolve_ssm_withversion.yaml")).render(
parameter_key=parameter_key, parameter_version=str(v1["Version"])
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value_v1
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
def test_resolve_ssm_secure(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Value=parameter_value, Type="SecureString")
template_rendered = jinja2.Template(load_template_raw("resolve_ssm_secure.yaml")).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
@pytest.mark.parametrize(
"template_name", ["resolve_secretsmanager_full.yaml", "resolve_secretsmanager.yaml"]
)
def test_resolve_secretsmanager(
secretsmanager_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
create_secret,
create_parameter,
cleanup_changesets,
cleanup_stacks,
template_name,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_secret(Name=parameter_key, SecretString=parameter_value)
template_rendered = jinja2.Template(load_template_raw(template_name)).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id]) | tests/integration/cloudformation/test_cloudformation_dynamic_parameters.py | import jinja2 as jinja2
import pytest as pytest
from localstack.utils.common import short_uid
from localstack.utils.generic.wait_utils import wait_until
from tests.integration.cloudformation.test_cloudformation_changesets import load_template_raw
def test_resolve_ssm(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Value=parameter_value, Type="String")
template_rendered = jinja2.Template(load_template_raw("resolve_ssm.yaml")).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
def test_resolve_ssm_withversion(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value_v0 = f"param-value-{short_uid()}"
parameter_value_v1 = f"param-value-{short_uid()}"
parameter_value_v2 = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Type="String", Value=parameter_value_v0)
v1 = ssm_client.put_parameter(
Name=parameter_key, Overwrite=True, Type="String", Value=parameter_value_v1
)
ssm_client.put_parameter(
Name=parameter_key, Overwrite=True, Type="String", Value=parameter_value_v2
)
template_rendered = jinja2.Template(load_template_raw("resolve_ssm_withversion.yaml")).render(
parameter_key=parameter_key, parameter_version=str(v1["Version"])
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value_v1
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
def test_resolve_ssm_secure(
ssm_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
cleanup_changesets,
cleanup_stacks,
create_parameter,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_parameter(Name=parameter_key, Value=parameter_value, Type="SecureString")
template_rendered = jinja2.Template(load_template_raw("resolve_ssm_secure.yaml")).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id])
@pytest.mark.parametrize(
"template_name", ["resolve_secretsmanager_full.yaml", "resolve_secretsmanager.yaml"]
)
def test_resolve_secretsmanager(
secretsmanager_client,
cfn_client,
is_change_set_created_and_available,
is_stack_created,
create_secret,
create_parameter,
cleanup_changesets,
cleanup_stacks,
template_name,
):
stack_name = f"stack-{short_uid()}"
change_set_name = f"change-set-{short_uid()}"
parameter_key = f"param-key-{short_uid()}"
parameter_value = f"param-value-{short_uid()}"
create_secret(Name=parameter_key, SecretString=parameter_value)
template_rendered = jinja2.Template(load_template_raw(template_name)).render(
parameter_key=parameter_key,
)
response = cfn_client.create_change_set(
StackName=stack_name,
ChangeSetName=change_set_name,
TemplateBody=template_rendered,
ChangeSetType="CREATE",
)
change_set_id = response["Id"]
stack_id = response["StackId"]
try:
wait_until(is_change_set_created_and_available(change_set_id))
cfn_client.execute_change_set(ChangeSetName=change_set_id)
wait_until(is_stack_created(stack_id))
describe_result = cfn_client.describe_stacks(StackName=stack_id)["Stacks"][0]
assert describe_result["StackStatus"] == "CREATE_COMPLETE"
topic_name = [
o["OutputValue"] for o in describe_result["Outputs"] if o["OutputKey"] == "TopicName"
][0]
assert topic_name == parameter_value
finally:
cleanup_changesets([change_set_id])
cleanup_stacks([stack_id]) | 0.213623 | 0.395309 |
# @package OpTestHMC
# This class can contain common functions which are useful for
# FSP_PHYP (HMC) platforms
import os
import sys
import time
import pexpect
import shlex
import OpTestLogger
from common.OpTestError import OpTestError
from common.OpTestSSH import OpTestSSH
from common.OpTestUtil import OpTestUtil
from common.Exceptions import CommandFailed
from common import OPexpect
from .OpTestConstants import OpTestConstants as BMC_CONST
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
WAITTIME = 15
SYS_WAITTIME = 200
BOOTTIME = 500
STALLTIME = 5
class OpHmcState():
'''
This class is used as an enum as to what state op-test *thinks* the LPAR is in.
These states are used to check status of a LPAR.
'''
NOT_ACTIVE = 'Not Activated'
RUNNING = 'Running'
SHUTTING = 'Shutting Down'
OF = 'Open Firmware'
STARTING = 'Starting'
NA = 'Not Available'
class OpManagedState():
'''
This class is used as an enum as to what state op-test *thinks* the managed
system is in. These states are used to check status of managed system.
'''
OPERATING = 'Operating'
INIT = 'Initializing'
OFF = 'Power Off'
PROG_OFF = 'Power Off In Progress'
class ConsoleState():
DISCONNECTED = 0
CONNECTED = 1
class Spawn(OPexpect.spawn):
def __init__(self, command, args=[], maxread=8000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=False, echo=True, preexec_fn=None,
encoding='utf-8', codec_errors='ignore', dimensions=None,
failure_callback=None, failure_callback_data=None):
super(Spawn, self).__init__(command, args=args,
maxread=maxread,
searchwindowsize=searchwindowsize,
logfile=logfile,
cwd=cwd, env=env,
ignore_sighup=ignore_sighup,
encoding=encoding,
codec_errors=codec_errors)
def sendline(self, command=''):
# HMC console required an enter to be sent with each sendline
super(Spawn, self).sendline(command)
self.send("\r")
class HMCUtil():
'''
Utility and functions of HMC object
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=None,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.logfile = logfile
self.mg_system = managed_system
self.tgt_mg_system = tgt_managed_system
self.tgt_lpar = tgt_lpar
self.check_ssh_keys = check_ssh_keys
self.known_hosts_file = known_hosts_file
self.lpar_name = lpar_name
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.lpar_vios = lpar_vios
self.util = OpTestUtil()
self.prompt = prompt
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.ssh = OpTestSSH(hmc_ip, user_name, password, logfile=self.logfile,
check_ssh_keys=check_ssh_keys,
known_hosts_file=known_hosts_file,
block_setup_term=block_setup_term)
self.scratch_disk = scratch_disk
self.proxy = proxy
self.scratch_disk_size = None
self.delaybeforesend = delaybeforesend
self.system = None
# OpTestUtil instance is NOT conf's
self.pty = None
# allows caller specific control of when to block setup_term
self.block_setup_term = block_setup_term
# tells setup_term to not throw exceptions, like when system off
self.setup_term_quiet = 0
# flags the object to abandon setup_term operations, like when system off
self.setup_term_disable = 0
# functional simulators are very slow, so multiply all default timeouts by this factor
self.timeout_factor = timeout_factor
# state tracking, reset on boot and state changes
# console tracking done on System object for the system console
self.PS1_set = -1
self.LOGIN_set = -1
self.SUDO_set = -1
def deactivate_lpar_console(self):
self.ssh.run_command("rmvterm -m %s -p %s" %
(self.mg_system, self.lpar_name), timeout=10)
def poweroff_system(self):
if self.get_system_state() != OpManagedState.OPERATING:
raise OpTestError('Managed Systen not in Operating state')
self.ssh.run_command("chsysstate -m %s -r sys -o off" % self.mg_system)
self.wait_system_state(OpManagedState.OFF)
def poweron_system(self):
if self.get_system_state() != OpManagedState.OFF:
raise OpTestError('Managed Systen not is Power off state!')
self.ssh.run_command("chsysstate -m %s -r sys -o on" % self.mg_system)
self.wait_system_state()
if self.lpar_vios:
log.debug("Starting VIOS %s", self.lpar_vios)
self.poweron_lpar(vios=True)
def poweroff_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state(OpHmcState.NOT_ACTIVE)
def poweron_lpar(self, vios=False):
if self.get_lpar_state(vios) == OpHmcState.RUNNING:
log.info('LPAR Already powered on!')
return BMC_CONST.FW_SUCCESS
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
cmd = "chsysstate -m %s -r lpar -n %s -o on" % (self.mg_system, lpar_name)
if self.lpar_prof:
cmd = "%s -f %s" % (cmd, self.lpar_prof)
self.wait_lpar_state(OpHmcState.NOT_ACTIVE, vios=vios)
self.ssh.run_command(cmd)
self.wait_lpar_state(vios=vios)
time.sleep(STALLTIME)
return BMC_CONST.FW_SUCCESS
def dumprestart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o dumprestart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def restart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed --restart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def get_lpar_cfg(self):
out = self.ssh.run_command("lssyscfg -r prof -m %s --filter 'lpar_names=%s'" %
(self.mg_system, self.lpar_name))[-1]
cfg_dict = {}
splitter = shlex.shlex(out)
splitter.whitespace += ','
splitter.whitespace_split = True
for values in list(splitter):
data = values.split("=")
key = data[0]
value = data[1]
cfg_dict[key] = value
return cfg_dict
def set_lpar_cfg(self, arg_str):
if not self.lpar_prof:
raise OpTestError("Profile needs to be defined to use this method")
self.ssh.run_command("chsyscfg -r prof -m %s -p %s -i 'lpar_name=%s,name=%s,%s' --force" %
(self.mg_system, self.lpar_name, self.lpar_name, self.lpar_prof,arg_str))
def get_lpar_state(self, vios=False):
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
state = self.ssh.run_command(
'lssyscfg -m %s -r lpar --filter lpar_names=%s -F state' % (self.mg_system, lpar_name))[-1]
ref_code = self.ssh.run_command(
'lsrefcode -m %s -r lpar --filter lpar_names=%s -F refcode' % (self.mg_system, lpar_name))[-1]
if state == 'Running':
if 'Linux' in ref_code or not ref_code:
return 'Running'
else:
return 'Booting'
return state
def get_system_state(self):
state = self.ssh.run_command(
'lssyscfg -m %s -r sys -F state' % self.mg_system)
return state[-1]
def wait_lpar_state(self, exp_state=OpHmcState.RUNNING, vios=False, timeout=WAITTIME):
state = self.get_lpar_state(vios)
count = 0
while state != exp_state:
state = self.get_lpar_state(vios)
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 120:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def wait_system_state(self, exp_state=OpManagedState.OPERATING, timeout=SYS_WAITTIME):
state = self.get_system_state()
count = 0
while state != exp_state:
state = self.get_system_state()
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 60:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def is_lpar_in_managed_system(self, mg_system=None, lpar_name=None):
lpar_list = self.ssh.run_command(
'lssyscfg -r lpar -m %s -F name' % mg_system)
if lpar_name in lpar_list:
log.info("%s lpar found in managed system %s" % (mg_system, lpar_name))
return True
return False
def migrate_lpar(self, src_mg_system=None, dest_mg_system=None):
if src_mg_system == None or dest_mg_system == None:
raise OpTestError("Source and Destination Managed System required for LPM")
if not self.is_lpar_in_managed_system(src_mg_system, self.lpar_name):
raise OpTestError("Lpar %s not found in managed system %s" % (self.lpar_name, src_mg_system))
self.ssh.run_command(
'migrlpar -o v -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
self.ssh.run_command(
'migrlpar -o m -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
if self.is_lpar_in_managed_system(dest_mg_system, self.lpar_name):
log.info("Migration of lpar %s from %s to %s is successfull" %
(self.lpar_name, src_mg_system, dest_mg_system))
self.mg_system = dest_mg_system
return True
log.info("Migration of lpar %s from %s to %s failed" %
(self.lpar_name, src_mg_system, dest_mg_system))
return False
def run_command_ignore_fail(self, command, timeout=60, retry=0):
return self.ssh.run_command_ignore_fail(command, timeout*self.timeout_factor, retry)
def run_command(self, i_cmd, timeout=15):
return self.ssh.run_command(i_cmd, timeout)
class OpTestHMC(HMCUtil):
'''
This class contains the modules to perform various HMC operations on an LPAR.
The Host IP, username and password of HMC have to be passed to the class intially
while creating the object for the class.
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
super(OpTestHMC, self).__init__(hmc_ip, user_name, password, scratch_disk,
proxy, logfile, managed_system, lpar_name, prompt,
block_setup_term, delaybeforesend, timeout_factor,
lpar_prof, lpar_vios, lpar_user, lpar_password,
check_ssh_keys, known_hosts_file, tgt_managed_system,
tgt_lpar)
self.console = HMCConsole(hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password)
def set_system(self, system):
self.system = system
self.ssh.set_system(system)
self.console.set_system(system)
def get_rest_api(self):
return None
def has_os_boot_sensor(self):
return False
def has_occ_active_sensor(self):
return False
def has_host_status_sensor(self):
return False
def has_inband_bootdev(self):
return False
def get_host_console(self):
return self.console
class HMCConsole(HMCUtil):
"""
HMCConsole Class
Methods to manage the console of LPAR
"""
def __init__(self, hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
logfile=sys.stdout, prompt=None, scratch_disk="",
check_ssh_keys=False, known_hosts_file=None, proxy=""):
self.logfile = logfile
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.mg_system = managed_system
self.util = OpTestUtil()
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.lpar_name = lpar_name
self.lpar_vios = lpar_vios
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.scratch_disk = scratch_disk
self.proxy = proxy
self.state = ConsoleState.DISCONNECTED
self.delaybeforesend = delaybeforesend
self.system = None
# OpTestUtil instance is NOT conf's
self.prompt = prompt
self.pty = None
self.delaybeforesend = delaybeforesend
# allows caller specific control of when to block setup_term
self.block_setup_term = block_setup_term
# tells setup_term to not throw exceptions, like when system off
self.setup_term_quiet = 0
# flags the object to abandon setup_term operations, like when system off
self.setup_term_disable = 0
# FUTURE - System Console currently tracked in System Object
# state tracking, reset on boot and state changes
self.PS1_set = -1
self.LOGIN_set = -1
self.SUDO_set = -1
super(HMCConsole, self).__init__(hmc_ip, user_name, password, scratch_disk, proxy,
logfile, managed_system, lpar_name, prompt,
block_setup_term, delaybeforesend, timeout_factor,
lpar_prof, lpar_vios, lpar_user, lpar_password,
check_ssh_keys, known_hosts_file)
def set_system(self, system):
self.ssh.set_system(system)
self.system = system
self.pty = self.get_console()
self.pty.set_system(system)
def get_host_console(self):
return self.pty
def set_system_setup_term(self, flag):
self.system.block_setup_term = flag
def get_system_setup_term(self):
return self.system.block_setup_term
def get_scratch_disk(self):
return self.scratch_disk
def get_proxy(self):
return self.proxy
def hostname(self):
return self.hmc_ip
def username(self):
return self.user
def password(self):
return self.<PASSWORD>
def set_block_setup_term(self, flag):
self.block_setup_term = flag
def get_block_setup_term(self):
return self.block_setup_term
def enable_setup_term_quiet(self):
self.setup_term_quiet = 1
self.setup_term_disable = 0
def disable_setup_term_quiet(self):
self.setup_term_quiet = 0
self.setup_term_disable = 0
def close(self):
self.util.clear_state(self)
try:
self.pty.close()
if self.pty.status != -1: # leaving for debug
if os.WIFEXITED(self.pty.status):
os.WEXITSTATUS(self.pty.status)
else:
os.WTERMSIG(self.pty.status)
self.state = ConsoleState.DISCONNECTED
except pexpect.ExceptionPexpect:
self.state = ConsoleState.DISCONNECTED
raise "HMC Console: failed to close console"
except Exception:
self.state = ConsoleState.DISCONNECTED
log.debug("HMC close -> TERMINATE")
def connect(self, logger=None):
if self.state == ConsoleState.CONNECTED:
return self.pty
self.util.clear_state(self) # clear when coming in DISCONNECTED
log.info("De-activating the console")
self.deactivate_lpar_console()
log.debug("#HMC Console CONNECT")
command = "sshpass -p %s ssh -p 22 -l %s %s -o PubkeyAuthentication=no"\
" -o afstokenpassing=no -q -o 'UserKnownHostsFile=/dev/null'"\
" -o 'StrictHostKeyChecking=no'"
try:
self.pty = Spawn(
command % (self.passwd, self.user, self.hmc_ip))
log.info("Opening the LPAR console")
time.sleep(STALLTIME)
self.pty.send('\r')
self.pty.sendline("mkvterm -m %s -p %s" % (self.mg_system, self.lpar_name))
self.pty.send('\r')
time.sleep(STALLTIME)
i = self.pty.expect(
["Open Completed.", pexpect.TIMEOUT], timeout=60)
self.pty.logfile = sys.stdout
if logger:
self.pty.logfile_read = OpTestLogger.FileLikeLogger(logger)
else:
self.pty.logfile_read = OpTestLogger.FileLikeLogger(log)
if i == 0:
time.sleep(STALLTIME)
self.state = ConsoleState.CONNECTED
self.pty.setwinsize(1000, 1000)
else:
raise OpTestError("Check the lpar activate command")
except Exception as exp:
self.state = ConsoleState.DISCONNECTED
raise CommandFailed('OPexpect.spawn',
'OPexpect.spawn encountered a problem: ' + str(exp), -1)
if self.delaybeforesend:
self.pty.delaybeforesend = self.delaybeforesend
if not self.pty.isalive():
raise CommandFailed("mkvterm", self.pty.read(), self.pty.status)
return self.pty
def check_state(self):
return self.state
def get_console(self, logger=None):
if self.state == ConsoleState.DISCONNECTED:
self.util.clear_state(self)
self.connect(logger=logger)
time.sleep(STALLTIME)
l_rc = self.pty.expect(["login:", pexpect.TIMEOUT], timeout=30)
if l_rc == 0:
self.pty.send('\r')
else:
time.sleep(STALLTIME)
self.pty.send('\r')
self.pty.sendline('PS1=' + self.util.build_prompt(self.prompt))
self.pty.send('\r')
time.sleep(STALLTIME)
l_rc = self.pty.expect(
[self.expect_prompt, pexpect.TIMEOUT], timeout=WAITTIME)
if l_rc == 0:
log.debug("Shell prompt changed")
else:
self.pty.send('\r')
log.debug("Waiting till booting!")
self.pty = self.get_login_prompt()
if self.system.SUDO_set != 1 or self.system.LOGIN_set != 1 or self.system.PS1_set != 1:
self.util.setup_term(self.system, self.pty,
None, self.system.block_setup_term)
# Clear buffer before usage
self.pty.buffer = ""
return self.pty
def get_login_prompt(self):
# Assuming 'Normal' boot set in LPAR profile
# We wait for upto 500 seconds for LPAR to boot to OS
self.pty.send('\r')
time.sleep(STALLTIME)
log.debug("Waiting for login screen")
i = self.pty.expect(["login:", self.expect_prompt, pexpect.TIMEOUT], timeout=30)
if i == 0:
log.debug("System has booted")
time.sleep(STALLTIME)
elif i == 1:
log.debug("Console already logged in")
else:
log.error("Failed to get login prompt %s", self.pty.before)
# To cheat system for making using of HMC SSH
self.system.PS1_set = 1
self.system.LOGIN_set = 1
self.system.SUDO_set = 1
return self.pty
def run_command(self, i_cmd, timeout=15):
return self.util.run_command(self, i_cmd, timeout) | common/OpTestHMC.py |
# @package OpTestHMC
# This class can contain common functions which are useful for
# FSP_PHYP (HMC) platforms
import os
import sys
import time
import pexpect
import shlex
import OpTestLogger
from common.OpTestError import OpTestError
from common.OpTestSSH import OpTestSSH
from common.OpTestUtil import OpTestUtil
from common.Exceptions import CommandFailed
from common import OPexpect
from .OpTestConstants import OpTestConstants as BMC_CONST
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
WAITTIME = 15
SYS_WAITTIME = 200
BOOTTIME = 500
STALLTIME = 5
class OpHmcState():
'''
This class is used as an enum as to what state op-test *thinks* the LPAR is in.
These states are used to check status of a LPAR.
'''
NOT_ACTIVE = 'Not Activated'
RUNNING = 'Running'
SHUTTING = 'Shutting Down'
OF = 'Open Firmware'
STARTING = 'Starting'
NA = 'Not Available'
class OpManagedState():
'''
This class is used as an enum as to what state op-test *thinks* the managed
system is in. These states are used to check status of managed system.
'''
OPERATING = 'Operating'
INIT = 'Initializing'
OFF = 'Power Off'
PROG_OFF = 'Power Off In Progress'
class ConsoleState():
DISCONNECTED = 0
CONNECTED = 1
class Spawn(OPexpect.spawn):
def __init__(self, command, args=[], maxread=8000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=False, echo=True, preexec_fn=None,
encoding='utf-8', codec_errors='ignore', dimensions=None,
failure_callback=None, failure_callback_data=None):
super(Spawn, self).__init__(command, args=args,
maxread=maxread,
searchwindowsize=searchwindowsize,
logfile=logfile,
cwd=cwd, env=env,
ignore_sighup=ignore_sighup,
encoding=encoding,
codec_errors=codec_errors)
def sendline(self, command=''):
# HMC console required an enter to be sent with each sendline
super(Spawn, self).sendline(command)
self.send("\r")
class HMCUtil():
'''
Utility and functions of HMC object
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=None,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.logfile = logfile
self.mg_system = managed_system
self.tgt_mg_system = tgt_managed_system
self.tgt_lpar = tgt_lpar
self.check_ssh_keys = check_ssh_keys
self.known_hosts_file = known_hosts_file
self.lpar_name = lpar_name
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.lpar_vios = lpar_vios
self.util = OpTestUtil()
self.prompt = prompt
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.ssh = OpTestSSH(hmc_ip, user_name, password, logfile=self.logfile,
check_ssh_keys=check_ssh_keys,
known_hosts_file=known_hosts_file,
block_setup_term=block_setup_term)
self.scratch_disk = scratch_disk
self.proxy = proxy
self.scratch_disk_size = None
self.delaybeforesend = delaybeforesend
self.system = None
# OpTestUtil instance is NOT conf's
self.pty = None
# allows caller specific control of when to block setup_term
self.block_setup_term = block_setup_term
# tells setup_term to not throw exceptions, like when system off
self.setup_term_quiet = 0
# flags the object to abandon setup_term operations, like when system off
self.setup_term_disable = 0
# functional simulators are very slow, so multiply all default timeouts by this factor
self.timeout_factor = timeout_factor
# state tracking, reset on boot and state changes
# console tracking done on System object for the system console
self.PS1_set = -1
self.LOGIN_set = -1
self.SUDO_set = -1
def deactivate_lpar_console(self):
self.ssh.run_command("rmvterm -m %s -p %s" %
(self.mg_system, self.lpar_name), timeout=10)
def poweroff_system(self):
if self.get_system_state() != OpManagedState.OPERATING:
raise OpTestError('Managed Systen not in Operating state')
self.ssh.run_command("chsysstate -m %s -r sys -o off" % self.mg_system)
self.wait_system_state(OpManagedState.OFF)
def poweron_system(self):
if self.get_system_state() != OpManagedState.OFF:
raise OpTestError('Managed Systen not is Power off state!')
self.ssh.run_command("chsysstate -m %s -r sys -o on" % self.mg_system)
self.wait_system_state()
if self.lpar_vios:
log.debug("Starting VIOS %s", self.lpar_vios)
self.poweron_lpar(vios=True)
def poweroff_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state(OpHmcState.NOT_ACTIVE)
def poweron_lpar(self, vios=False):
if self.get_lpar_state(vios) == OpHmcState.RUNNING:
log.info('LPAR Already powered on!')
return BMC_CONST.FW_SUCCESS
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
cmd = "chsysstate -m %s -r lpar -n %s -o on" % (self.mg_system, lpar_name)
if self.lpar_prof:
cmd = "%s -f %s" % (cmd, self.lpar_prof)
self.wait_lpar_state(OpHmcState.NOT_ACTIVE, vios=vios)
self.ssh.run_command(cmd)
self.wait_lpar_state(vios=vios)
time.sleep(STALLTIME)
return BMC_CONST.FW_SUCCESS
def dumprestart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o dumprestart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def restart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed --restart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def get_lpar_cfg(self):
out = self.ssh.run_command("lssyscfg -r prof -m %s --filter 'lpar_names=%s'" %
(self.mg_system, self.lpar_name))[-1]
cfg_dict = {}
splitter = shlex.shlex(out)
splitter.whitespace += ','
splitter.whitespace_split = True
for values in list(splitter):
data = values.split("=")
key = data[0]
value = data[1]
cfg_dict[key] = value
return cfg_dict
def set_lpar_cfg(self, arg_str):
if not self.lpar_prof:
raise OpTestError("Profile needs to be defined to use this method")
self.ssh.run_command("chsyscfg -r prof -m %s -p %s -i 'lpar_name=%s,name=%s,%s' --force" %
(self.mg_system, self.lpar_name, self.lpar_name, self.lpar_prof,arg_str))
def get_lpar_state(self, vios=False):
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
state = self.ssh.run_command(
'lssyscfg -m %s -r lpar --filter lpar_names=%s -F state' % (self.mg_system, lpar_name))[-1]
ref_code = self.ssh.run_command(
'lsrefcode -m %s -r lpar --filter lpar_names=%s -F refcode' % (self.mg_system, lpar_name))[-1]
if state == 'Running':
if 'Linux' in ref_code or not ref_code:
return 'Running'
else:
return 'Booting'
return state
def get_system_state(self):
state = self.ssh.run_command(
'lssyscfg -m %s -r sys -F state' % self.mg_system)
return state[-1]
def wait_lpar_state(self, exp_state=OpHmcState.RUNNING, vios=False, timeout=WAITTIME):
state = self.get_lpar_state(vios)
count = 0
while state != exp_state:
state = self.get_lpar_state(vios)
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 120:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def wait_system_state(self, exp_state=OpManagedState.OPERATING, timeout=SYS_WAITTIME):
state = self.get_system_state()
count = 0
while state != exp_state:
state = self.get_system_state()
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 60:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def is_lpar_in_managed_system(self, mg_system=None, lpar_name=None):
lpar_list = self.ssh.run_command(
'lssyscfg -r lpar -m %s -F name' % mg_system)
if lpar_name in lpar_list:
log.info("%s lpar found in managed system %s" % (mg_system, lpar_name))
return True
return False
def migrate_lpar(self, src_mg_system=None, dest_mg_system=None):
if src_mg_system == None or dest_mg_system == None:
raise OpTestError("Source and Destination Managed System required for LPM")
if not self.is_lpar_in_managed_system(src_mg_system, self.lpar_name):
raise OpTestError("Lpar %s not found in managed system %s" % (self.lpar_name, src_mg_system))
self.ssh.run_command(
'migrlpar -o v -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
self.ssh.run_command(
'migrlpar -o m -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
if self.is_lpar_in_managed_system(dest_mg_system, self.lpar_name):
log.info("Migration of lpar %s from %s to %s is successfull" %
(self.lpar_name, src_mg_system, dest_mg_system))
self.mg_system = dest_mg_system
return True
log.info("Migration of lpar %s from %s to %s failed" %
(self.lpar_name, src_mg_system, dest_mg_system))
return False
def run_command_ignore_fail(self, command, timeout=60, retry=0):
return self.ssh.run_command_ignore_fail(command, timeout*self.timeout_factor, retry)
def run_command(self, i_cmd, timeout=15):
return self.ssh.run_command(i_cmd, timeout)
class OpTestHMC(HMCUtil):
'''
This class contains the modules to perform various HMC operations on an LPAR.
The Host IP, username and password of HMC have to be passed to the class intially
while creating the object for the class.
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
super(OpTestHMC, self).__init__(hmc_ip, user_name, password, scratch_disk,
proxy, logfile, managed_system, lpar_name, prompt,
block_setup_term, delaybeforesend, timeout_factor,
lpar_prof, lpar_vios, lpar_user, lpar_password,
check_ssh_keys, known_hosts_file, tgt_managed_system,
tgt_lpar)
self.console = HMCConsole(hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password)
def set_system(self, system):
self.system = system
self.ssh.set_system(system)
self.console.set_system(system)
def get_rest_api(self):
return None
def has_os_boot_sensor(self):
return False
def has_occ_active_sensor(self):
return False
def has_host_status_sensor(self):
return False
def has_inband_bootdev(self):
return False
def get_host_console(self):
return self.console
class HMCConsole(HMCUtil):
"""
HMCConsole Class
Methods to manage the console of LPAR
"""
def __init__(self, hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
logfile=sys.stdout, prompt=None, scratch_disk="",
check_ssh_keys=False, known_hosts_file=None, proxy=""):
self.logfile = logfile
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.mg_system = managed_system
self.util = OpTestUtil()
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.lpar_name = lpar_name
self.lpar_vios = lpar_vios
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.scratch_disk = scratch_disk
self.proxy = proxy
self.state = ConsoleState.DISCONNECTED
self.delaybeforesend = delaybeforesend
self.system = None
# OpTestUtil instance is NOT conf's
self.prompt = prompt
self.pty = None
self.delaybeforesend = delaybeforesend
# allows caller specific control of when to block setup_term
self.block_setup_term = block_setup_term
# tells setup_term to not throw exceptions, like when system off
self.setup_term_quiet = 0
# flags the object to abandon setup_term operations, like when system off
self.setup_term_disable = 0
# FUTURE - System Console currently tracked in System Object
# state tracking, reset on boot and state changes
self.PS1_set = -1
self.LOGIN_set = -1
self.SUDO_set = -1
super(HMCConsole, self).__init__(hmc_ip, user_name, password, scratch_disk, proxy,
logfile, managed_system, lpar_name, prompt,
block_setup_term, delaybeforesend, timeout_factor,
lpar_prof, lpar_vios, lpar_user, lpar_password,
check_ssh_keys, known_hosts_file)
def set_system(self, system):
self.ssh.set_system(system)
self.system = system
self.pty = self.get_console()
self.pty.set_system(system)
def get_host_console(self):
return self.pty
def set_system_setup_term(self, flag):
self.system.block_setup_term = flag
def get_system_setup_term(self):
return self.system.block_setup_term
def get_scratch_disk(self):
return self.scratch_disk
def get_proxy(self):
return self.proxy
def hostname(self):
return self.hmc_ip
def username(self):
return self.user
def password(self):
return self.<PASSWORD>
def set_block_setup_term(self, flag):
self.block_setup_term = flag
def get_block_setup_term(self):
return self.block_setup_term
def enable_setup_term_quiet(self):
self.setup_term_quiet = 1
self.setup_term_disable = 0
def disable_setup_term_quiet(self):
self.setup_term_quiet = 0
self.setup_term_disable = 0
def close(self):
self.util.clear_state(self)
try:
self.pty.close()
if self.pty.status != -1: # leaving for debug
if os.WIFEXITED(self.pty.status):
os.WEXITSTATUS(self.pty.status)
else:
os.WTERMSIG(self.pty.status)
self.state = ConsoleState.DISCONNECTED
except pexpect.ExceptionPexpect:
self.state = ConsoleState.DISCONNECTED
raise "HMC Console: failed to close console"
except Exception:
self.state = ConsoleState.DISCONNECTED
log.debug("HMC close -> TERMINATE")
def connect(self, logger=None):
if self.state == ConsoleState.CONNECTED:
return self.pty
self.util.clear_state(self) # clear when coming in DISCONNECTED
log.info("De-activating the console")
self.deactivate_lpar_console()
log.debug("#HMC Console CONNECT")
command = "sshpass -p %s ssh -p 22 -l %s %s -o PubkeyAuthentication=no"\
" -o afstokenpassing=no -q -o 'UserKnownHostsFile=/dev/null'"\
" -o 'StrictHostKeyChecking=no'"
try:
self.pty = Spawn(
command % (self.passwd, self.user, self.hmc_ip))
log.info("Opening the LPAR console")
time.sleep(STALLTIME)
self.pty.send('\r')
self.pty.sendline("mkvterm -m %s -p %s" % (self.mg_system, self.lpar_name))
self.pty.send('\r')
time.sleep(STALLTIME)
i = self.pty.expect(
["Open Completed.", pexpect.TIMEOUT], timeout=60)
self.pty.logfile = sys.stdout
if logger:
self.pty.logfile_read = OpTestLogger.FileLikeLogger(logger)
else:
self.pty.logfile_read = OpTestLogger.FileLikeLogger(log)
if i == 0:
time.sleep(STALLTIME)
self.state = ConsoleState.CONNECTED
self.pty.setwinsize(1000, 1000)
else:
raise OpTestError("Check the lpar activate command")
except Exception as exp:
self.state = ConsoleState.DISCONNECTED
raise CommandFailed('OPexpect.spawn',
'OPexpect.spawn encountered a problem: ' + str(exp), -1)
if self.delaybeforesend:
self.pty.delaybeforesend = self.delaybeforesend
if not self.pty.isalive():
raise CommandFailed("mkvterm", self.pty.read(), self.pty.status)
return self.pty
def check_state(self):
return self.state
def get_console(self, logger=None):
if self.state == ConsoleState.DISCONNECTED:
self.util.clear_state(self)
self.connect(logger=logger)
time.sleep(STALLTIME)
l_rc = self.pty.expect(["login:", pexpect.TIMEOUT], timeout=30)
if l_rc == 0:
self.pty.send('\r')
else:
time.sleep(STALLTIME)
self.pty.send('\r')
self.pty.sendline('PS1=' + self.util.build_prompt(self.prompt))
self.pty.send('\r')
time.sleep(STALLTIME)
l_rc = self.pty.expect(
[self.expect_prompt, pexpect.TIMEOUT], timeout=WAITTIME)
if l_rc == 0:
log.debug("Shell prompt changed")
else:
self.pty.send('\r')
log.debug("Waiting till booting!")
self.pty = self.get_login_prompt()
if self.system.SUDO_set != 1 or self.system.LOGIN_set != 1 or self.system.PS1_set != 1:
self.util.setup_term(self.system, self.pty,
None, self.system.block_setup_term)
# Clear buffer before usage
self.pty.buffer = ""
return self.pty
def get_login_prompt(self):
# Assuming 'Normal' boot set in LPAR profile
# We wait for upto 500 seconds for LPAR to boot to OS
self.pty.send('\r')
time.sleep(STALLTIME)
log.debug("Waiting for login screen")
i = self.pty.expect(["login:", self.expect_prompt, pexpect.TIMEOUT], timeout=30)
if i == 0:
log.debug("System has booted")
time.sleep(STALLTIME)
elif i == 1:
log.debug("Console already logged in")
else:
log.error("Failed to get login prompt %s", self.pty.before)
# To cheat system for making using of HMC SSH
self.system.PS1_set = 1
self.system.LOGIN_set = 1
self.system.SUDO_set = 1
return self.pty
def run_command(self, i_cmd, timeout=15):
return self.util.run_command(self, i_cmd, timeout) | 0.318697 | 0.223419 |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for BboxTransform."""
import numpy as np
from vega.core.common.class_factory import ClassFactory, ClassType
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
:param bboxes: shape (..., 4*k)
:type bboxes: ndarray
:param img_shape:(height, width)
:type img_shape: tuple
:return: the bbox after flip
:rtype: ndarray
"""
w = img_shape[1]
flipped = bboxes.copy()
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
return flipped
@ClassFactory.register(ClassType.TRANSFORM)
class BboxTransform(object):
"""Bbox transform, which contains.
1. rescale bboxes according to image size
2. flip bboxes (if needed)
3. pad the first dimension to `max_num_gts`
:param max_num_gts: the maxmum number of the ground truth, defaults to None
:type max_num_gts: int
"""
def __init__(self, max_num_gts=None):
"""Construct the BboxTransform class."""
self.max_num_gts = max_num_gts
def __call__(self, bboxes, img_shape, scale_factor, flip=False):
"""Call function of BboxTransform.
:param bboxes: bounding box
:type bboxes: ndarray
:param img_shape: image shape
:type img_shape: tuple
:param scale_factor: the scale factor according to the image tramsform
:type scale_factor: float
:param flip: Whether to flip or not, defaults to False
:type flip: bool
:return: the bounding box after tramsform
:rtype: ndarray
"""
gt_bboxes = bboxes * scale_factor
if flip:
gt_bboxes = bbox_flip(gt_bboxes, img_shape)
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
if self.max_num_gts is None:
return gt_bboxes
else:
num_gts = gt_bboxes.shape[0]
padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32)
padded_bboxes[:num_gts, :] = gt_bboxes
return padded_bboxes | vega/datasets/pytorch/transforms/BboxTransform.py |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for BboxTransform."""
import numpy as np
from vega.core.common.class_factory import ClassFactory, ClassType
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
:param bboxes: shape (..., 4*k)
:type bboxes: ndarray
:param img_shape:(height, width)
:type img_shape: tuple
:return: the bbox after flip
:rtype: ndarray
"""
w = img_shape[1]
flipped = bboxes.copy()
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
return flipped
@ClassFactory.register(ClassType.TRANSFORM)
class BboxTransform(object):
"""Bbox transform, which contains.
1. rescale bboxes according to image size
2. flip bboxes (if needed)
3. pad the first dimension to `max_num_gts`
:param max_num_gts: the maxmum number of the ground truth, defaults to None
:type max_num_gts: int
"""
def __init__(self, max_num_gts=None):
"""Construct the BboxTransform class."""
self.max_num_gts = max_num_gts
def __call__(self, bboxes, img_shape, scale_factor, flip=False):
"""Call function of BboxTransform.
:param bboxes: bounding box
:type bboxes: ndarray
:param img_shape: image shape
:type img_shape: tuple
:param scale_factor: the scale factor according to the image tramsform
:type scale_factor: float
:param flip: Whether to flip or not, defaults to False
:type flip: bool
:return: the bounding box after tramsform
:rtype: ndarray
"""
gt_bboxes = bboxes * scale_factor
if flip:
gt_bboxes = bbox_flip(gt_bboxes, img_shape)
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
if self.max_num_gts is None:
return gt_bboxes
else:
num_gts = gt_bboxes.shape[0]
padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32)
padded_bboxes[:num_gts, :] = gt_bboxes
return padded_bboxes | 0.903241 | 0.348008 |
import pytest
from .. import api
# test setup to avoid cluttering pytest parameterize
mni2009_urls = [
"https://doi.org/10.1016/j.neuroimage.2010.07.033",
"https://doi.org/10.1016/S1053-8119(09)70884-5",
"http://nist.mni.mcgill.ca/?p=904",
"https://doi.org/10.1007/3-540-48714-X_16",
]
mni2009_fbib = """\
@article{Fonov_2011,
\tdoi = {10.1016/j.neuroimage.2010.07.033},
\turl = {https://doi.org/10.1016%2Fj.neuroimage.2010.07.033},
\tyear = 2011,
\tmonth = {jan},
\tpublisher = {Elsevier {BV}},
\tvolume = {54},
\tnumber = {1},
\tpages = {313--327},
\tauthor = {<NAME> and <NAME> and <NAME> and <NAME> \
Almli and <NAME> and <NAME>},
\ttitle = {Unbiased average age-appropriate atlases for pediatric studies},
\tjournal = {{NeuroImage}}
}"""
mni2009_lbib = """\
@incollection{Collins_1999,
\tdoi = {10.1007/3-540-48714-x_16},
\turl = {https://doi.org/10.1007%2F3-540-48714-x_16},
\tyear = 1999,
\tpublisher = {<NAME>},
\tpages = {210--223},
\tauthor = {<NAME> and <NAME> and <NAME>{\\'{e}} and <NAME>},
\ttitle = {{ANIMAL}$\\mathplus${INSECT}: Improved Cortical Structure Segmentation},
\tbooktitle = {Lecture Notes in Computer Science}
}"""
fslr_urls = [
"https://doi.org/10.1093/cercor/bhr291",
"https://github.com/Washington-University/HCPpipelines/tree/master/global/templates",
]
fslr_fbib = """\
@article{Van_Essen_2011,
\tdoi = {10.1093/cercor/bhr291},
\turl = {https://doi.org/10.1093%2Fcercor%2Fbhr291},
\tyear = 2011,
\tmonth = {nov},
\tpublisher = {Oxford University Press ({OUP})},
\tvolume = {22},
\tnumber = {10},
\tpages = {2241--2262},
\tauthor = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
\ttitle = {Parcellations and Hemispheric Asymmetries of Human Cerebral Cortex Analyzed on \
Surface-Based Atlases},
\tjournal = {Cerebral Cortex}
}"""
fslr_lbib = (
"https://github.com/Washington-University/HCPpipelines/tree/master/global/templates"
)
@pytest.mark.parametrize(
"template,urls,fbib,lbib",
[
("MNI152NLin2009cAsym", mni2009_urls, mni2009_fbib, mni2009_lbib),
("fsLR", fslr_urls, fslr_fbib, fslr_lbib),
("fsaverage", [], None, None),
],
)
def test_citations(tmp_path, template, urls, fbib, lbib):
"""Check the correct composition of citations."""
assert api.get_citations(template) == urls
bibs = api.get_citations(template, bibtex=True)
if bibs:
assert "".join(bibs[0]) == fbib
assert "".join(bibs[-1]) == lbib
else:
# no citations currently
assert template == "fsaverage" | templateflow/tests/test_api.py | import pytest
from .. import api
# test setup to avoid cluttering pytest parameterize
mni2009_urls = [
"https://doi.org/10.1016/j.neuroimage.2010.07.033",
"https://doi.org/10.1016/S1053-8119(09)70884-5",
"http://nist.mni.mcgill.ca/?p=904",
"https://doi.org/10.1007/3-540-48714-X_16",
]
mni2009_fbib = """\
@article{Fonov_2011,
\tdoi = {10.1016/j.neuroimage.2010.07.033},
\turl = {https://doi.org/10.1016%2Fj.neuroimage.2010.07.033},
\tyear = 2011,
\tmonth = {jan},
\tpublisher = {Elsevier {BV}},
\tvolume = {54},
\tnumber = {1},
\tpages = {313--327},
\tauthor = {<NAME> and <NAME> and <NAME> and <NAME> \
Almli and <NAME> and <NAME>},
\ttitle = {Unbiased average age-appropriate atlases for pediatric studies},
\tjournal = {{NeuroImage}}
}"""
mni2009_lbib = """\
@incollection{Collins_1999,
\tdoi = {10.1007/3-540-48714-x_16},
\turl = {https://doi.org/10.1007%2F3-540-48714-x_16},
\tyear = 1999,
\tpublisher = {<NAME>},
\tpages = {210--223},
\tauthor = {<NAME> and <NAME> and <NAME>{\\'{e}} and <NAME>},
\ttitle = {{ANIMAL}$\\mathplus${INSECT}: Improved Cortical Structure Segmentation},
\tbooktitle = {Lecture Notes in Computer Science}
}"""
fslr_urls = [
"https://doi.org/10.1093/cercor/bhr291",
"https://github.com/Washington-University/HCPpipelines/tree/master/global/templates",
]
fslr_fbib = """\
@article{Van_Essen_2011,
\tdoi = {10.1093/cercor/bhr291},
\turl = {https://doi.org/10.1093%2Fcercor%2Fbhr291},
\tyear = 2011,
\tmonth = {nov},
\tpublisher = {Oxford University Press ({OUP})},
\tvolume = {22},
\tnumber = {10},
\tpages = {2241--2262},
\tauthor = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
\ttitle = {Parcellations and Hemispheric Asymmetries of Human Cerebral Cortex Analyzed on \
Surface-Based Atlases},
\tjournal = {Cerebral Cortex}
}"""
fslr_lbib = (
"https://github.com/Washington-University/HCPpipelines/tree/master/global/templates"
)
@pytest.mark.parametrize(
"template,urls,fbib,lbib",
[
("MNI152NLin2009cAsym", mni2009_urls, mni2009_fbib, mni2009_lbib),
("fsLR", fslr_urls, fslr_fbib, fslr_lbib),
("fsaverage", [], None, None),
],
)
def test_citations(tmp_path, template, urls, fbib, lbib):
"""Check the correct composition of citations."""
assert api.get_citations(template) == urls
bibs = api.get_citations(template, bibtex=True)
if bibs:
assert "".join(bibs[0]) == fbib
assert "".join(bibs[-1]) == lbib
else:
# no citations currently
assert template == "fsaverage" | 0.486819 | 0.591753 |