max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
hyperplan/labels_descriptors.py | hyperplan-io/cli | 1 | 6614051 | from prettytable import PrettyTable
from hyperplan.qcm_result import QCMResult
from hyperplan.get_input import get_feature_type, get_feature_dimension, get_alphanumerical_id
from hyperplan.label_schema import LabelSchema
def post_features():
return []
def noop():
return []
def qcm(choice_1, choice_2, choice_3):
print('1. Create a new label')
print('2. Finish and save')
print('3. Finish and ignore')
choice = input('Choose: ')
if choice == '1':
return (QCMResult.CREATED_FEATURE, choice_1())
elif choice == '2':
return (QCMResult.CLOSE_AND_SAVE, choice_2())
elif choice == '3':
return (QCMResult.CLOSE_AND_DO_NOT_SAVE, choice_3())
else:
return qcm(choice_1, choice_2, choice_3)
def list_labels(api, logger):
try:
labels = api.list_labels(logger, log=False)
logger.debug('server returns labels: {}'.format(labels))
if labels == None:
return None
table = PrettyTable(['Id', 'Type', 'Description', 'oneOf'])
for label in labels:
data = label['data']
label_id = label['id']
label_type = data['type']
label_description = data['description']
if label_type == 'oneOf':
label_one_of = ", ".join(data['oneOf'])
table.add_row([label_id, label_type, label_description, label_one_of])
elif label_type == 'dynamic':
table.add_row([label_id, label_type, label_description, ''])
else:
print('Label type is unknown')
print(table)
return labels
except Exception as err:
logger.warn('An unhandled error occurred in list_labels: {}'.format(err))
return None
def describe_label(api, logger, label_id):
try:
label = api.get_labels(logger, label_id, log=False)
logger.debug('server returns label: {}'.format(label))
if label == None:
return None
data = label['data']
label_type = data['type']
label_description = data['description']
if label_type == 'oneOf':
label_one_of = ", ".join(data['oneOf'])
table = PrettyTable(['Id', 'Type', 'Description', 'oneOf'])
table.add_row([label_id, label_type, label_description, label_one_of])
print(table)
elif label_type == 'dynamic':
table = PrettyTable(['Id', 'Type', 'Description'])
table.add_row([label_id, label_type, label_description])
print(table)
else:
print('Label type is unknown')
return label
except Exception as err:
logger.warn('an unhandled error occurred in describe_labels: {}'.format(err))
return None
def get_labels_id():
feature_id = input('id: ')
if not feature_id.isalnum():
print('Label descriptor id should be alphanumeric')
return get_labels_id()
return feature_id
def get_label_type():
label_type = input('type(oneOf or dynamic): ')
if label_type != 'oneOf' and label_type != 'dynamic':
print('Label type should either be oneOf or dynamic')
return get_label_type()
return label_type
def get_label_one_of():
one_of = input('enter the labels separated by a comma: ')
return one_of.split(",")
def get_label_description():
description = input('description: ')
return description
def create_labels(api, logger, label_id, label_type=None, label_one_of=None, label_description=None):
label_data = get_label_data(label_type, label_one_of, label_description)
label_schema = LabelSchema(label_id, label_data)
logger.debug('json payload for create_labels: {}'.format(label_schema.to_json()))
try:
api.create_label(logger, label_schema)
except Exception as err:
logger.warn('an unhandled error occurred in create_labels: {}'.format(err))
return None
def label_build(label_type, label_one_of, label_description):
return {'type': label_type, 'oneOf': label_one_of, 'description': label_description}
def get_label_data(label_type=None, label_one_of=None, label_description=None):
if label_type == None:
label_type = get_label_type()
if label_type == 'oneOf' and label_one_of == None:
label_one_of = get_label_one_of()
elif label_type == 'dynamic':
label_one_of = []
if label_description == None:
label_description = get_label_description()
return label_build(label_type, label_one_of, label_description)
| from prettytable import PrettyTable
from hyperplan.qcm_result import QCMResult
from hyperplan.get_input import get_feature_type, get_feature_dimension, get_alphanumerical_id
from hyperplan.label_schema import LabelSchema
def post_features():
return []
def noop():
return []
def qcm(choice_1, choice_2, choice_3):
print('1. Create a new label')
print('2. Finish and save')
print('3. Finish and ignore')
choice = input('Choose: ')
if choice == '1':
return (QCMResult.CREATED_FEATURE, choice_1())
elif choice == '2':
return (QCMResult.CLOSE_AND_SAVE, choice_2())
elif choice == '3':
return (QCMResult.CLOSE_AND_DO_NOT_SAVE, choice_3())
else:
return qcm(choice_1, choice_2, choice_3)
def list_labels(api, logger):
try:
labels = api.list_labels(logger, log=False)
logger.debug('server returns labels: {}'.format(labels))
if labels == None:
return None
table = PrettyTable(['Id', 'Type', 'Description', 'oneOf'])
for label in labels:
data = label['data']
label_id = label['id']
label_type = data['type']
label_description = data['description']
if label_type == 'oneOf':
label_one_of = ", ".join(data['oneOf'])
table.add_row([label_id, label_type, label_description, label_one_of])
elif label_type == 'dynamic':
table.add_row([label_id, label_type, label_description, ''])
else:
print('Label type is unknown')
print(table)
return labels
except Exception as err:
logger.warn('An unhandled error occurred in list_labels: {}'.format(err))
return None
def describe_label(api, logger, label_id):
try:
label = api.get_labels(logger, label_id, log=False)
logger.debug('server returns label: {}'.format(label))
if label == None:
return None
data = label['data']
label_type = data['type']
label_description = data['description']
if label_type == 'oneOf':
label_one_of = ", ".join(data['oneOf'])
table = PrettyTable(['Id', 'Type', 'Description', 'oneOf'])
table.add_row([label_id, label_type, label_description, label_one_of])
print(table)
elif label_type == 'dynamic':
table = PrettyTable(['Id', 'Type', 'Description'])
table.add_row([label_id, label_type, label_description])
print(table)
else:
print('Label type is unknown')
return label
except Exception as err:
logger.warn('an unhandled error occurred in describe_labels: {}'.format(err))
return None
def get_labels_id():
feature_id = input('id: ')
if not feature_id.isalnum():
print('Label descriptor id should be alphanumeric')
return get_labels_id()
return feature_id
def get_label_type():
label_type = input('type(oneOf or dynamic): ')
if label_type != 'oneOf' and label_type != 'dynamic':
print('Label type should either be oneOf or dynamic')
return get_label_type()
return label_type
def get_label_one_of():
one_of = input('enter the labels separated by a comma: ')
return one_of.split(",")
def get_label_description():
description = input('description: ')
return description
def create_labels(api, logger, label_id, label_type=None, label_one_of=None, label_description=None):
label_data = get_label_data(label_type, label_one_of, label_description)
label_schema = LabelSchema(label_id, label_data)
logger.debug('json payload for create_labels: {}'.format(label_schema.to_json()))
try:
api.create_label(logger, label_schema)
except Exception as err:
logger.warn('an unhandled error occurred in create_labels: {}'.format(err))
return None
def label_build(label_type, label_one_of, label_description):
return {'type': label_type, 'oneOf': label_one_of, 'description': label_description}
def get_label_data(label_type=None, label_one_of=None, label_description=None):
if label_type == None:
label_type = get_label_type()
if label_type == 'oneOf' and label_one_of == None:
label_one_of = get_label_one_of()
elif label_type == 'dynamic':
label_one_of = []
if label_description == None:
label_description = get_label_description()
return label_build(label_type, label_one_of, label_description)
| none | 1 | 2.336209 | 2 | |
guenther/config/__init__.py | lad1337/guenther | 0 | 6614052 | <gh_stars>0
from configparser import ConfigParser
import logging
from pathlib import Path
from attrdict import AttrDict
from .default import ARGUMENT_MAP
from .default import DEFAUL_CONFIG
logger = logging.getLogger('guenther')
def configure_parser(parser):
for argument in sorted(ARGUMENT_MAP):
argument_param = ('--%s' % argument).replace('_', '-')
parser.add_argument(argument_param, dest=argument)
class Config(ConfigParser):
def __init__(self, *args, **kwargs):
self.arguments = kwargs
super(Config, self).__init__()
self.load_defaults()
self.load_arguments(self.arguments)
self.init(self.arguments)
self.load_arguments(self.arguments)
def init(self, arguments):
config_path = Path(self.get('path', 'config'))
if not config_path.parent.exists():
self.create_directories(config_path)
if arguments.get('reset') or not config_path.exists():
self.write_config_file(config_path)
logger.debug('Reading config file at: %s', config_path)
self.read(str(config_path))
def load_defaults(self):
def value_for_maybe_type(value):
if isinstance(value, tuple):
return value[0]
return str(value)
for section, values in DEFAUL_CONFIG.items():
self[section] = {
k: value_for_maybe_type(v) for k, v in values.items()}
def load_arguments(self, arguments):
for name, value in arguments.items():
if value is None:
continue
section, target_name = ARGUMENT_MAP.get(name, (None, None))
# TODO(lad1337): overwrite only values that have been passed, not with defaults
if section is None or target_name is None:
logger.warning('Unknown command line argument: "%s"', name)
continue
self.set(section, target_name, str(value))
logger.debug(
'Overwriting config "%s:%s" with "%s"',
section,
target_name,
str(value)
)
def create_directories(self, config_path): # noqa
logger.info("Creating directories at: %s", config_path.parent)
config_path.parent.mkdir(parents=True)
def write_config_file(self, config_path): # noqa
with config_path.open("w") as config_file:
logger.info("Writing config at: %s", config_path)
self.write(config_file)
def typed(self, section, key):
default_value_data = DEFAUL_CONFIG.get(section, {}).get(key)
if not isinstance(default_value_data, tuple):
return self.get(section, key)
type_ = default_value_data[1]
if type_ is bool:
return self.getboolean(section, key)
elif type_ is int:
return self.getint(section, key)
elif type_ is float:
return self.getfloat(section, key)
return self.get(section, key)
def __getattr__(self, section):
if section not in self.sections():
raise AttributeError('No section "%s"' % section)
return AttrDict({
key: self.typed(section, key) for key in self[section]
})
| from configparser import ConfigParser
import logging
from pathlib import Path
from attrdict import AttrDict
from .default import ARGUMENT_MAP
from .default import DEFAUL_CONFIG
logger = logging.getLogger('guenther')
def configure_parser(parser):
for argument in sorted(ARGUMENT_MAP):
argument_param = ('--%s' % argument).replace('_', '-')
parser.add_argument(argument_param, dest=argument)
class Config(ConfigParser):
def __init__(self, *args, **kwargs):
self.arguments = kwargs
super(Config, self).__init__()
self.load_defaults()
self.load_arguments(self.arguments)
self.init(self.arguments)
self.load_arguments(self.arguments)
def init(self, arguments):
config_path = Path(self.get('path', 'config'))
if not config_path.parent.exists():
self.create_directories(config_path)
if arguments.get('reset') or not config_path.exists():
self.write_config_file(config_path)
logger.debug('Reading config file at: %s', config_path)
self.read(str(config_path))
def load_defaults(self):
def value_for_maybe_type(value):
if isinstance(value, tuple):
return value[0]
return str(value)
for section, values in DEFAUL_CONFIG.items():
self[section] = {
k: value_for_maybe_type(v) for k, v in values.items()}
def load_arguments(self, arguments):
for name, value in arguments.items():
if value is None:
continue
section, target_name = ARGUMENT_MAP.get(name, (None, None))
# TODO(lad1337): overwrite only values that have been passed, not with defaults
if section is None or target_name is None:
logger.warning('Unknown command line argument: "%s"', name)
continue
self.set(section, target_name, str(value))
logger.debug(
'Overwriting config "%s:%s" with "%s"',
section,
target_name,
str(value)
)
def create_directories(self, config_path): # noqa
logger.info("Creating directories at: %s", config_path.parent)
config_path.parent.mkdir(parents=True)
def write_config_file(self, config_path): # noqa
with config_path.open("w") as config_file:
logger.info("Writing config at: %s", config_path)
self.write(config_file)
def typed(self, section, key):
default_value_data = DEFAUL_CONFIG.get(section, {}).get(key)
if not isinstance(default_value_data, tuple):
return self.get(section, key)
type_ = default_value_data[1]
if type_ is bool:
return self.getboolean(section, key)
elif type_ is int:
return self.getint(section, key)
elif type_ is float:
return self.getfloat(section, key)
return self.get(section, key)
def __getattr__(self, section):
if section not in self.sections():
raise AttributeError('No section "%s"' % section)
return AttrDict({
key: self.typed(section, key) for key in self[section]
}) | en | 0.521378 | # TODO(lad1337): overwrite only values that have been passed, not with defaults # noqa # noqa | 2.624762 | 3 |
myGit.py | luizcartolano2/myGit | 0 | 6614053 | <gh_stars>0
"""
Implement just enough git to commit and push to GitHub.
Read the story here: http://benhoyt.com/writings/pygit/
Released under a permissive MIT license (see LICENSE.txt).
"""
import argparse, collections, difflib, enum, hashlib, operator, os, stat
import struct, sys, time, urllib, zlib
import requests
# Data for one entry in the git index (.git/index)
IndexEntry = collections.namedtuple('IndexEntry', [
'ctime_s', 'ctime_n', 'mtime_s', 'mtime_n', 'dev', 'ino', 'mode', 'uid',
'gid', 'size', 'sha1', 'flags', 'path',
])
class ObjectType(enum.Enum):
"""Object type enum. There are other types too, but we don't need them.
See "enum object_type" in git's source (git/cache.h).
"""
commit = 1
tree = 2
blob = 3
class MyGit(object):
"""docstring for MyGit"""
def __init__(self):
return
def read_file(self,path):
"""Read contents of file at given path as bytes."""
with open(path, 'rb') as f:
return f.read()
def write_file(self, path, data):
"""Write data bytes to file at given path."""
with open(path, 'wb') as f:
f.write(data)
def init(self, repo):
"""Create directory for repo and initialize .git directory."""
os.mkdir(repo)
os.mkdir(os.path.join(repo, '.git'))
for name in ['objects', 'refs', 'refs/heads']:
os.mkdir(os.path.join(repo, '.git', name))
self.write_file(path=os.path.join(repo, '.git', 'HEAD'),data=b'ref: refs/heads/master')
print('initialized empty repository: {}'.format(repo))
# def diff(self):
# """Show diff of files changed (between index and working copy)."""
# changed, _, _ = get_status()
# entries_by_path = {e.path: e for e in read_index()}
# for i, path in enumerate(changed):
# sha1 = entries_by_path[path].sha1.hex()
# obj_type, data = read_object(sha1)
# assert obj_type == 'blob'
# index_lines = data.decode().splitlines()
# working_lines = read_file(path).decode().splitlines()
# diff_lines = difflib.unified_diff(
# index_lines, working_lines,
# '{} (index)'.format(path),
# '{} (working copy)'.format(path),
# lineterm='')
# for line in diff_lines:
# print(line)
# if i < len(changed) - 1:
# print('-' * 70)
def hash_object(self, data, obj_type, write=True):
"""Compute hash of object data of given type and write to object store
if "write" is True. Return SHA-1 object hash as hex string.
"""
header = '{},{}'.format(obj_type, len(data)).encode()
full_data = header + b'\x00' + data
sha1 = hashlib.sha1(full_data).hexdigest()
if write:
path = os.path.join('.git', 'objects', sha1[:2], sha1[2:])
if not os.path.exists(path):
os.makedirs(os.path.dirname(path), exist_ok = True)
self.write_file(path, zlib.compress(full_data))
return sha1
def read_index(self):
"""Read git index file and return list of IndexEntry objects."""
try:
data = self.read_file(os.path.join('.git', 'index'))
except FileNotFoundError:
return []
digest = hashlib.sha1(data[:-20]).digest()
assert digest == data[-20:], 'invalid index checksum'
signature, version, num_entries = struct.unpack('!4sLL', data[:12])
assert signature == b'DIRC', \
'invalid index signature {}'.format(signature)
assert version == 2, 'unknown index version {}'.format(version)
entry_data = data[12:-20]
entries = []
i = 0
while i + 62 < len(entry_data):
fields_end = i + 62
fields = struct.unpack('!LLLLLLLLLL20sH', entry_data[i:fields_end])
path_end = entry_data.index(b'\x00', fields_end)
path = entry_data[fields_end:path_end]
entry = IndexEntry(*(fields + (path.decode(),)))
entries.append(entry)
entry_len = ((62 + len(path) + 8) // 8) * 8
i += entry_len
assert len(entries) == num_entries
return entries
def write_tree(self):
"""Write a tree object from the current index entries."""
tree_entries = []
for entry in self.read_index():
assert '/' not in entry.path, \
'currently only supports a single, top-level directory'
mode_path = '{:o} {}'.format(entry.mode, entry.path).encode()
tree_entry = mode_path + b'\x00' + entry.sha1
tree_entries.append(tree_entry)
return self.hash_object(b''.join(tree_entries), 'tree')
def commit(self, message, author):
"""Commit the current state of the index to master with given message.
Return hash of commit object.
"""
tree = self.write_tree()
parent = self.get_local_master_hash()
timestamp = int(time.mktime(time.localtime()))
utc_offset = -time.timezone
author_time = '{} {}{:02}{:02}'.format(
timestamp,
'+' if utc_offset > 0 else '-',
abs(utc_offset) // 3600,
(abs(utc_offset) // 60) % 60)
lines = ['tree ' + tree]
if parent:
lines.append('parent ' + parent)
lines.append('author {} {}'.format(author, author_time))
lines.append('committer {} {}'.format(author, author_time))
lines.append('')
lines.append(message)
lines.append('')
data = '\n'.join(lines).encode()
sha1 = self.hash_object(data, 'commit')
master_path = os.path.join('.git', 'refs', 'heads', 'master')
self.write_file(master_path, (sha1 + '\n').encode())
print('committed to master: {:7}'.format(sha1))
return sha1
def extract_lines(self, data):
"""Extract list of lines from given server data."""
lines = []
i = 0
for _ in range(1000):
line_length = int(data[i:i + 4], 16)
line = data[i + 4:i + line_length]
lines.append(line)
if line_length == 0:
i += 4
else:
i += line_length
if i >= len(data):
break
return lines
def build_lines_data(self, lines):
"""Build byte string from given lines to send to server."""
result = []
for line in lines:
result.append('{:04x}'.format(len(line) + 5).encode())
result.append(line)
result.append(b'\n')
result.append(b'0000')
return b''.join(result)
def http_request(self, url, username, password):
response = requests.get(url, auth=(username, password))
response.raise_for_status()
return response.content
def get_remote_master_hash(self, git_url, username, password):
"""Get commit hash of remote master branch, return SHA-1 hex string or
None if no remote commits.
"""
url = git_url + '/info/refs?service=git-receive-pack'
response = self.http_request(url, username, password)
lines = extract_lines(response)
assert lines[0] == b'# service=git-receive-pack\n'
assert lines[1] == b''
if lines[2][:40] == b'0' * 40:
return None
master_sha1, master_ref = lines[2].split(b'\x00')[0].split()
assert master_ref == b'refs/heads/master'
assert len(master_sha1) == 40
return master_sha1.decode()
def find_tree_objects(self, tree_sha1):
"""Return set of SHA-1 hashes of all objects in this tree
(recursively), including the hash of the tree itself.
"""
objects = {tree_sha1}
for mode, path, sha1 in self.read_tree(sha1=tree_sha1):
if stat.S_ISDIR(mode):
objects.update(self.find_tree_objects(sha1))
else:
objects.add(sha1)
return objects
def find_commit_objects(self, commit_sha1):
"""Return set of SHA-1 hashes of all objects in this commit
(recursively), its tree, its parents, and the hash of the commit
itself.
"""
objects = {commit_sha1}
obj_type, commit = self.read_object(commit_sha1)
assert obj_type == 'commit'
lines = commit.decode().splitlines()
tree = next(l[5:45] for l in lines if l.startswith('tree '))
objects.update(self.find_tree_objects(tree))
parents = (l[7:47] for l in lines if l.startswith('parent '))
for parent in parents:
objects.update(self.find_commit_objects(parent))
return objects
def find_missing_objects(self, local_sha1, remote_sha1):
"""Return set of SHA-1 hashes of objects in local commit that are
missing at the remote (based on the given remote commit hash).
"""
local_objects = self.find_commit_objects(local_sha1)
if remote_sha1 is None:
return local_objects
remote_objects = self.find_commit_objects(remote_sha1)
return local_objects - remote_objects
def encode_pack_object(self, obj):
"""Encode a single object for a pack file and return bytes
(variable-length header followed by compressed data bytes).
"""
obj_type, data = self.read_object(obj)
type_num = ObjectType[obj_type].value
size = len(data)
byte = (type_num << 4) | (size & 0x0f)
size >>= 4
header = []
while size:
header.append(byte | 0x80)
byte = size & 0x7f
size >>= 7
header.append(byte)
return bytes(header) + zlib.compress(data)
def create_pack(self, objects):
"""Create pack file containing all objects in given given set of
SHA-1 hashes, return data bytes of full pack file.
"""
header = struct.pack('!4sLL', b'PACK', 2, len(objects))
body = b''.join(self.encode_pack_object(o) for o in sorted(objects))
contents = header + body
sha1 = hashlib.sha1(contents).digest()
data = contents + sha1
return data
def push(self, git_url, username, password):
"""Push master branch to given git repo URL."""
remote_sha1 = self.get_remote_master_hash(git_url, username, password)
local_sha1 = self.get_local_master_hash()
missing = self.find_missing_objects(local_sha1, remote_sha1)
lines = ['{} {} refs/heads/master\x00 report-status'.format(
remote_sha1 or ('0' * 40), local_sha1).encode()]
data = self.build_lines_data(lines) + self.create_pack(missing)
url = git_url + '/git-receive-pack'
response = self.http_request(url, username, password, data=data)
lines = self.extract_lines(response)
assert lines[0] == b'unpack ok\n', \
"expected line 1 b'unpack ok', got: {}".format(lines[0])
| """
Implement just enough git to commit and push to GitHub.
Read the story here: http://benhoyt.com/writings/pygit/
Released under a permissive MIT license (see LICENSE.txt).
"""
import argparse, collections, difflib, enum, hashlib, operator, os, stat
import struct, sys, time, urllib, zlib
import requests
# Data for one entry in the git index (.git/index)
IndexEntry = collections.namedtuple('IndexEntry', [
'ctime_s', 'ctime_n', 'mtime_s', 'mtime_n', 'dev', 'ino', 'mode', 'uid',
'gid', 'size', 'sha1', 'flags', 'path',
])
class ObjectType(enum.Enum):
"""Object type enum. There are other types too, but we don't need them.
See "enum object_type" in git's source (git/cache.h).
"""
commit = 1
tree = 2
blob = 3
class MyGit(object):
"""docstring for MyGit"""
def __init__(self):
return
def read_file(self,path):
"""Read contents of file at given path as bytes."""
with open(path, 'rb') as f:
return f.read()
def write_file(self, path, data):
"""Write data bytes to file at given path."""
with open(path, 'wb') as f:
f.write(data)
def init(self, repo):
"""Create directory for repo and initialize .git directory."""
os.mkdir(repo)
os.mkdir(os.path.join(repo, '.git'))
for name in ['objects', 'refs', 'refs/heads']:
os.mkdir(os.path.join(repo, '.git', name))
self.write_file(path=os.path.join(repo, '.git', 'HEAD'),data=b'ref: refs/heads/master')
print('initialized empty repository: {}'.format(repo))
# def diff(self):
# """Show diff of files changed (between index and working copy)."""
# changed, _, _ = get_status()
# entries_by_path = {e.path: e for e in read_index()}
# for i, path in enumerate(changed):
# sha1 = entries_by_path[path].sha1.hex()
# obj_type, data = read_object(sha1)
# assert obj_type == 'blob'
# index_lines = data.decode().splitlines()
# working_lines = read_file(path).decode().splitlines()
# diff_lines = difflib.unified_diff(
# index_lines, working_lines,
# '{} (index)'.format(path),
# '{} (working copy)'.format(path),
# lineterm='')
# for line in diff_lines:
# print(line)
# if i < len(changed) - 1:
# print('-' * 70)
def hash_object(self, data, obj_type, write=True):
"""Compute hash of object data of given type and write to object store
if "write" is True. Return SHA-1 object hash as hex string.
"""
header = '{},{}'.format(obj_type, len(data)).encode()
full_data = header + b'\x00' + data
sha1 = hashlib.sha1(full_data).hexdigest()
if write:
path = os.path.join('.git', 'objects', sha1[:2], sha1[2:])
if not os.path.exists(path):
os.makedirs(os.path.dirname(path), exist_ok = True)
self.write_file(path, zlib.compress(full_data))
return sha1
def read_index(self):
"""Read git index file and return list of IndexEntry objects."""
try:
data = self.read_file(os.path.join('.git', 'index'))
except FileNotFoundError:
return []
digest = hashlib.sha1(data[:-20]).digest()
assert digest == data[-20:], 'invalid index checksum'
signature, version, num_entries = struct.unpack('!4sLL', data[:12])
assert signature == b'DIRC', \
'invalid index signature {}'.format(signature)
assert version == 2, 'unknown index version {}'.format(version)
entry_data = data[12:-20]
entries = []
i = 0
while i + 62 < len(entry_data):
fields_end = i + 62
fields = struct.unpack('!LLLLLLLLLL20sH', entry_data[i:fields_end])
path_end = entry_data.index(b'\x00', fields_end)
path = entry_data[fields_end:path_end]
entry = IndexEntry(*(fields + (path.decode(),)))
entries.append(entry)
entry_len = ((62 + len(path) + 8) // 8) * 8
i += entry_len
assert len(entries) == num_entries
return entries
def write_tree(self):
"""Write a tree object from the current index entries."""
tree_entries = []
for entry in self.read_index():
assert '/' not in entry.path, \
'currently only supports a single, top-level directory'
mode_path = '{:o} {}'.format(entry.mode, entry.path).encode()
tree_entry = mode_path + b'\x00' + entry.sha1
tree_entries.append(tree_entry)
return self.hash_object(b''.join(tree_entries), 'tree')
def commit(self, message, author):
"""Commit the current state of the index to master with given message.
Return hash of commit object.
"""
tree = self.write_tree()
parent = self.get_local_master_hash()
timestamp = int(time.mktime(time.localtime()))
utc_offset = -time.timezone
author_time = '{} {}{:02}{:02}'.format(
timestamp,
'+' if utc_offset > 0 else '-',
abs(utc_offset) // 3600,
(abs(utc_offset) // 60) % 60)
lines = ['tree ' + tree]
if parent:
lines.append('parent ' + parent)
lines.append('author {} {}'.format(author, author_time))
lines.append('committer {} {}'.format(author, author_time))
lines.append('')
lines.append(message)
lines.append('')
data = '\n'.join(lines).encode()
sha1 = self.hash_object(data, 'commit')
master_path = os.path.join('.git', 'refs', 'heads', 'master')
self.write_file(master_path, (sha1 + '\n').encode())
print('committed to master: {:7}'.format(sha1))
return sha1
def extract_lines(self, data):
"""Extract list of lines from given server data."""
lines = []
i = 0
for _ in range(1000):
line_length = int(data[i:i + 4], 16)
line = data[i + 4:i + line_length]
lines.append(line)
if line_length == 0:
i += 4
else:
i += line_length
if i >= len(data):
break
return lines
def build_lines_data(self, lines):
"""Build byte string from given lines to send to server."""
result = []
for line in lines:
result.append('{:04x}'.format(len(line) + 5).encode())
result.append(line)
result.append(b'\n')
result.append(b'0000')
return b''.join(result)
def http_request(self, url, username, password):
response = requests.get(url, auth=(username, password))
response.raise_for_status()
return response.content
def get_remote_master_hash(self, git_url, username, password):
"""Get commit hash of remote master branch, return SHA-1 hex string or
None if no remote commits.
"""
url = git_url + '/info/refs?service=git-receive-pack'
response = self.http_request(url, username, password)
lines = extract_lines(response)
assert lines[0] == b'# service=git-receive-pack\n'
assert lines[1] == b''
if lines[2][:40] == b'0' * 40:
return None
master_sha1, master_ref = lines[2].split(b'\x00')[0].split()
assert master_ref == b'refs/heads/master'
assert len(master_sha1) == 40
return master_sha1.decode()
def find_tree_objects(self, tree_sha1):
"""Return set of SHA-1 hashes of all objects in this tree
(recursively), including the hash of the tree itself.
"""
objects = {tree_sha1}
for mode, path, sha1 in self.read_tree(sha1=tree_sha1):
if stat.S_ISDIR(mode):
objects.update(self.find_tree_objects(sha1))
else:
objects.add(sha1)
return objects
def find_commit_objects(self, commit_sha1):
"""Return set of SHA-1 hashes of all objects in this commit
(recursively), its tree, its parents, and the hash of the commit
itself.
"""
objects = {commit_sha1}
obj_type, commit = self.read_object(commit_sha1)
assert obj_type == 'commit'
lines = commit.decode().splitlines()
tree = next(l[5:45] for l in lines if l.startswith('tree '))
objects.update(self.find_tree_objects(tree))
parents = (l[7:47] for l in lines if l.startswith('parent '))
for parent in parents:
objects.update(self.find_commit_objects(parent))
return objects
def find_missing_objects(self, local_sha1, remote_sha1):
"""Return set of SHA-1 hashes of objects in local commit that are
missing at the remote (based on the given remote commit hash).
"""
local_objects = self.find_commit_objects(local_sha1)
if remote_sha1 is None:
return local_objects
remote_objects = self.find_commit_objects(remote_sha1)
return local_objects - remote_objects
def encode_pack_object(self, obj):
"""Encode a single object for a pack file and return bytes
(variable-length header followed by compressed data bytes).
"""
obj_type, data = self.read_object(obj)
type_num = ObjectType[obj_type].value
size = len(data)
byte = (type_num << 4) | (size & 0x0f)
size >>= 4
header = []
while size:
header.append(byte | 0x80)
byte = size & 0x7f
size >>= 7
header.append(byte)
return bytes(header) + zlib.compress(data)
def create_pack(self, objects):
"""Create pack file containing all objects in given given set of
SHA-1 hashes, return data bytes of full pack file.
"""
header = struct.pack('!4sLL', b'PACK', 2, len(objects))
body = b''.join(self.encode_pack_object(o) for o in sorted(objects))
contents = header + body
sha1 = hashlib.sha1(contents).digest()
data = contents + sha1
return data
def push(self, git_url, username, password):
"""Push master branch to given git repo URL."""
remote_sha1 = self.get_remote_master_hash(git_url, username, password)
local_sha1 = self.get_local_master_hash()
missing = self.find_missing_objects(local_sha1, remote_sha1)
lines = ['{} {} refs/heads/master\x00 report-status'.format(
remote_sha1 or ('0' * 40), local_sha1).encode()]
data = self.build_lines_data(lines) + self.create_pack(missing)
url = git_url + '/git-receive-pack'
response = self.http_request(url, username, password, data=data)
lines = self.extract_lines(response)
assert lines[0] == b'unpack ok\n', \
"expected line 1 b'unpack ok', got: {}".format(lines[0]) | en | 0.809554 | Implement just enough git to commit and push to GitHub. Read the story here: http://benhoyt.com/writings/pygit/ Released under a permissive MIT license (see LICENSE.txt). # Data for one entry in the git index (.git/index) Object type enum. There are other types too, but we don't need them. See "enum object_type" in git's source (git/cache.h). docstring for MyGit Read contents of file at given path as bytes. Write data bytes to file at given path. Create directory for repo and initialize .git directory. # def diff(self): # """Show diff of files changed (between index and working copy).""" # changed, _, _ = get_status() # entries_by_path = {e.path: e for e in read_index()} # for i, path in enumerate(changed): # sha1 = entries_by_path[path].sha1.hex() # obj_type, data = read_object(sha1) # assert obj_type == 'blob' # index_lines = data.decode().splitlines() # working_lines = read_file(path).decode().splitlines() # diff_lines = difflib.unified_diff( # index_lines, working_lines, # '{} (index)'.format(path), # '{} (working copy)'.format(path), # lineterm='') # for line in diff_lines: # print(line) # if i < len(changed) - 1: # print('-' * 70) Compute hash of object data of given type and write to object store if "write" is True. Return SHA-1 object hash as hex string. Read git index file and return list of IndexEntry objects. Write a tree object from the current index entries. Commit the current state of the index to master with given message. Return hash of commit object. Extract list of lines from given server data. Build byte string from given lines to send to server. Get commit hash of remote master branch, return SHA-1 hex string or None if no remote commits. Return set of SHA-1 hashes of all objects in this tree (recursively), including the hash of the tree itself. Return set of SHA-1 hashes of all objects in this commit (recursively), its tree, its parents, and the hash of the commit itself. Return set of SHA-1 hashes of objects in local commit that are missing at the remote (based on the given remote commit hash). Encode a single object for a pack file and return bytes (variable-length header followed by compressed data bytes). Create pack file containing all objects in given given set of SHA-1 hashes, return data bytes of full pack file. Push master branch to given git repo URL. | 3.173527 | 3 |
youmin_textclassifier/features/generator.py | WENGIF/youmin_textclassifier | 3 | 6614054 | <filename>youmin_textclassifier/features/generator.py
# -*- coding: utf-8 -*-
""" 特征生成 """
import pickle
import numpy as np
from gensim.models import KeyedVectors
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from ..utils.db import Word2VecDb
def _word2vec_bin(text_data,
w2v_dict,
w2v_dim,
is_lower):
"""
文本词向量表征(二进制[bin]文件版)
异常处理:当句子中所有词汇在词向量库中均不存在,用0向量代替
Args:
text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...]
w2v_dict -- 二进制词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
returns:
word2vec_list -- numpy.matrix
"""
w2v_model = KeyedVectors.load_word2vec_format(w2v_dict,
binary=True,
unicode_errors="ignore")
word2vec_list = []
for each_sen in text_data:
sum_array = np.zeros(w2v_dim)
cnt = 0
for _word in each_sen.split():
_word = _word.lower() if is_lower else _word
if _word in w2v_model:
sum_array += np.array(w2v_model[_word])
cnt += 1
if cnt == 0:
word2vec_list.append(np.zeros(w2v_dim))
else:
word2vec_list.append(sum_array / float(cnt))
return np.matrix(word2vec_list)
def _word2vec_db(text_data,
w2v_dict,
w2v_dim,
is_lower):
"""
文本词向量表征(文件SQLite版)
异常处理:当句子中词汇在词向量库中均不存在,用0向量代替
Args:
text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...]
w2v_dict -- 词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
returns:
word2vec_list -- numpy.matrix
"""
word2vec_list = []
cli = Word2VecDb(db_path=w2v_dict)
for each_sen in text_data:
sum_array = cli.get_vec_batch(
[_.lower() if is_lower else _ for _ in each_sen.split()])
if sum_array is not None:
word2vec_list.append(np.array(sum_array).mean(axis=0))
else:
word2vec_list.append(np.zeros(w2v_dim))
cli.destroy()
return np.matrix(word2vec_list)
def token_to_vec(token_texts,
feature,
w2v_dict,
w2v_dim,
is_lower,
vectorizer_path=None,
mode=None):
"""
将文本表征为向量
Args:
token_texts -- 切词后空格隔开列表["w1 w2 ... w_n", ..., ]
feature -- optional: 文档-词项矩阵(dtm,基于bow), 词频-逆向文档频率(tf-idf), 词向量(w2v)
w2v_dict -- 词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
Kwargs:
vectorizer_path -- vectorizer 存储路径
mode -- 模型模式,optional: train/test/predict
returns:
pred_vec -- 文本特征向量,类型为: scipy.sparse.csr_matrix 或 numpy.matrix
"""
if feature == "w2v":
if w2v_dict:
if w2v_dict.endswith("db"):
pred_vec = _word2vec_db(token_texts, w2v_dict,
w2v_dim, is_lower)
elif w2v_dict.endswith("bin"):
pred_vec = _word2vec_bin(token_texts, w2v_dict,
w2v_dim, is_lower)
else:
# TODO: 兼容txt格式的词向量
raise ValueError(
"`%s` is not a supported w2v_dict" % w2v_dict)
else:
raise ValueError("Please input the w2v_dict path!")
elif feature in ("bow", "bow_l2", "tfidf"):
if vectorizer_path is None or mode is None:
raise ValueError("Please input the vectorizer_path or mode!")
if mode == "train":
# `token_pattern`默认过滤一个字符长度的词,在此设置保留
if feature == "bow":
vectorizer = CountVectorizer(token_pattern=r"(?u)\b\w+\b")
elif feature == "bow_l2":
# 等价: CountVectorizer + normalize("l2")
vectorizer = TfidfVectorizer(use_idf=False,
token_pattern=r"(?u)\b\w+\b")
else:
vectorizer = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b")
pred_vec = vectorizer.fit_transform(token_texts)
with open(vectorizer_path, "wb") as fw:
pickle.dump(vectorizer, fw)
else:
vectorizer = pickle.load(open(vectorizer_path, "rb"))
pred_vec = vectorizer.transform(token_texts)
else:
raise ValueError("`%s` is not a supported feature" % feature)
return pred_vec
def token_to_file(label_token_texts, outpath, sep="__label__"):
"""
将列表转为文件,用于规整fasttext所需输入格式
Args:
label_token_texts -- 训练数据,如[(label, "token1 token2"),...]
outpath -- 导出文件路径
Kwargs:
sep -- 分割符
returns:
outpath -- 数据行格式如: `__label__<y> <text>`
"""
try:
with open(outpath, "w", encoding="utf-8") as fw:
for _label, _text in label_token_texts:
fw.write("{}{} {}\n".format(sep, _label, _text))
except FileNotFoundError:
raise FileNotFoundError("Can't write the file(%s)." % outpath)
return outpath
| <filename>youmin_textclassifier/features/generator.py
# -*- coding: utf-8 -*-
""" 特征生成 """
import pickle
import numpy as np
from gensim.models import KeyedVectors
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from ..utils.db import Word2VecDb
def _word2vec_bin(text_data,
w2v_dict,
w2v_dim,
is_lower):
"""
文本词向量表征(二进制[bin]文件版)
异常处理:当句子中所有词汇在词向量库中均不存在,用0向量代替
Args:
text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...]
w2v_dict -- 二进制词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
returns:
word2vec_list -- numpy.matrix
"""
w2v_model = KeyedVectors.load_word2vec_format(w2v_dict,
binary=True,
unicode_errors="ignore")
word2vec_list = []
for each_sen in text_data:
sum_array = np.zeros(w2v_dim)
cnt = 0
for _word in each_sen.split():
_word = _word.lower() if is_lower else _word
if _word in w2v_model:
sum_array += np.array(w2v_model[_word])
cnt += 1
if cnt == 0:
word2vec_list.append(np.zeros(w2v_dim))
else:
word2vec_list.append(sum_array / float(cnt))
return np.matrix(word2vec_list)
def _word2vec_db(text_data,
w2v_dict,
w2v_dim,
is_lower):
"""
文本词向量表征(文件SQLite版)
异常处理:当句子中词汇在词向量库中均不存在,用0向量代替
Args:
text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...]
w2v_dict -- 词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
returns:
word2vec_list -- numpy.matrix
"""
word2vec_list = []
cli = Word2VecDb(db_path=w2v_dict)
for each_sen in text_data:
sum_array = cli.get_vec_batch(
[_.lower() if is_lower else _ for _ in each_sen.split()])
if sum_array is not None:
word2vec_list.append(np.array(sum_array).mean(axis=0))
else:
word2vec_list.append(np.zeros(w2v_dim))
cli.destroy()
return np.matrix(word2vec_list)
def token_to_vec(token_texts,
feature,
w2v_dict,
w2v_dim,
is_lower,
vectorizer_path=None,
mode=None):
"""
将文本表征为向量
Args:
token_texts -- 切词后空格隔开列表["w1 w2 ... w_n", ..., ]
feature -- optional: 文档-词项矩阵(dtm,基于bow), 词频-逆向文档频率(tf-idf), 词向量(w2v)
w2v_dict -- 词向量模型文件(w2v_dim维)
w2v_dim -- 词向量维度
is_lower -- 是否将词汇转换为小写
Kwargs:
vectorizer_path -- vectorizer 存储路径
mode -- 模型模式,optional: train/test/predict
returns:
pred_vec -- 文本特征向量,类型为: scipy.sparse.csr_matrix 或 numpy.matrix
"""
if feature == "w2v":
if w2v_dict:
if w2v_dict.endswith("db"):
pred_vec = _word2vec_db(token_texts, w2v_dict,
w2v_dim, is_lower)
elif w2v_dict.endswith("bin"):
pred_vec = _word2vec_bin(token_texts, w2v_dict,
w2v_dim, is_lower)
else:
# TODO: 兼容txt格式的词向量
raise ValueError(
"`%s` is not a supported w2v_dict" % w2v_dict)
else:
raise ValueError("Please input the w2v_dict path!")
elif feature in ("bow", "bow_l2", "tfidf"):
if vectorizer_path is None or mode is None:
raise ValueError("Please input the vectorizer_path or mode!")
if mode == "train":
# `token_pattern`默认过滤一个字符长度的词,在此设置保留
if feature == "bow":
vectorizer = CountVectorizer(token_pattern=r"(?u)\b\w+\b")
elif feature == "bow_l2":
# 等价: CountVectorizer + normalize("l2")
vectorizer = TfidfVectorizer(use_idf=False,
token_pattern=r"(?u)\b\w+\b")
else:
vectorizer = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b")
pred_vec = vectorizer.fit_transform(token_texts)
with open(vectorizer_path, "wb") as fw:
pickle.dump(vectorizer, fw)
else:
vectorizer = pickle.load(open(vectorizer_path, "rb"))
pred_vec = vectorizer.transform(token_texts)
else:
raise ValueError("`%s` is not a supported feature" % feature)
return pred_vec
def token_to_file(label_token_texts, outpath, sep="__label__"):
"""
将列表转为文件,用于规整fasttext所需输入格式
Args:
label_token_texts -- 训练数据,如[(label, "token1 token2"),...]
outpath -- 导出文件路径
Kwargs:
sep -- 分割符
returns:
outpath -- 数据行格式如: `__label__<y> <text>`
"""
try:
with open(outpath, "w", encoding="utf-8") as fw:
for _label, _text in label_token_texts:
fw.write("{}{} {}\n".format(sep, _label, _text))
except FileNotFoundError:
raise FileNotFoundError("Can't write the file(%s)." % outpath)
return outpath
| zh | 0.796905 | # -*- coding: utf-8 -*- 特征生成 文本词向量表征(二进制[bin]文件版) 异常处理:当句子中所有词汇在词向量库中均不存在,用0向量代替 Args: text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...] w2v_dict -- 二进制词向量模型文件(w2v_dim维) w2v_dim -- 词向量维度 is_lower -- 是否将词汇转换为小写 returns: word2vec_list -- numpy.matrix 文本词向量表征(文件SQLite版) 异常处理:当句子中词汇在词向量库中均不存在,用0向量代替 Args: text_data -- 需要转换的用空格隔开的文本数据,["我 来自 广州",...] w2v_dict -- 词向量模型文件(w2v_dim维) w2v_dim -- 词向量维度 is_lower -- 是否将词汇转换为小写 returns: word2vec_list -- numpy.matrix 将文本表征为向量 Args: token_texts -- 切词后空格隔开列表["w1 w2 ... w_n", ..., ] feature -- optional: 文档-词项矩阵(dtm,基于bow), 词频-逆向文档频率(tf-idf), 词向量(w2v) w2v_dict -- 词向量模型文件(w2v_dim维) w2v_dim -- 词向量维度 is_lower -- 是否将词汇转换为小写 Kwargs: vectorizer_path -- vectorizer 存储路径 mode -- 模型模式,optional: train/test/predict returns: pred_vec -- 文本特征向量,类型为: scipy.sparse.csr_matrix 或 numpy.matrix # TODO: 兼容txt格式的词向量 # `token_pattern`默认过滤一个字符长度的词,在此设置保留 # 等价: CountVectorizer + normalize("l2") 将列表转为文件,用于规整fasttext所需输入格式 Args: label_token_texts -- 训练数据,如[(label, "token1 token2"),...] outpath -- 导出文件路径 Kwargs: sep -- 分割符 returns: outpath -- 数据行格式如: `__label__<y> <text>` | 3.016531 | 3 |
tests/helpers/oauth.py | Max-Zhenzhera/my_vocab_backend | 1 | 6614055 | <filename>tests/helpers/oauth.py
from httpx import AsyncClient
from app.db.repositories import OAuthConnectionsRepository
from .auth import get_user_from_client
from ..users import TestUser
__all__ = ['link_oauth_connections']
async def link_oauth_connections(
oauth_connections_repository: OAuthConnectionsRepository,
client: AsyncClient,
test_user: TestUser
) -> None:
created_user = get_user_from_client(client)
async with oauth_connections_repository.session.begin():
for oauth_connection in test_user.get_oauth_connections(created_user.id):
await oauth_connections_repository.link_connection(oauth_connection)
| <filename>tests/helpers/oauth.py
from httpx import AsyncClient
from app.db.repositories import OAuthConnectionsRepository
from .auth import get_user_from_client
from ..users import TestUser
__all__ = ['link_oauth_connections']
async def link_oauth_connections(
oauth_connections_repository: OAuthConnectionsRepository,
client: AsyncClient,
test_user: TestUser
) -> None:
created_user = get_user_from_client(client)
async with oauth_connections_repository.session.begin():
for oauth_connection in test_user.get_oauth_connections(created_user.id):
await oauth_connections_repository.link_connection(oauth_connection)
| none | 1 | 2.117614 | 2 | |
inversion/prelude.py | yohan-pg/stylegan2-ada-pytorch | 0 | 6614056 | import copy
import os
from time import perf_counter
import sys
import torch.optim as optim
import tqdm
import click
import dataclasses
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import dnnlib
import legacy
from xml.dom import minidom
import shutil
from torchvision.utils import save_image, make_grid
from interpolator import interpolate_images
from training.networks import normalize_2nd_moment
from abc import ABC, abstractmethod, abstractstaticmethod, abstractclassmethod
from dataclasses import dataclass, field
from typing import Optional, Type, List, final, Tuple, Callable, Iterator, Iterable, Dict, ClassVar, Union
import matplotlib.pyplot as plt
from torchvision.io import write_video
ImageTensor = torch.Tensor # [B, C, H, W] with data between 0 and 1
class ToStyles(ABC, torch.nn.Module):
@abstractmethod
def to_styles(self):
raise NotImplementedError
Styles = torch.Tensor
def dbg(x):
print(x)
return x
def imview(image):
save_image(image, "tmp/tmp.png")
os.system("code tmp/tmp.png")
| import copy
import os
from time import perf_counter
import sys
import torch.optim as optim
import tqdm
import click
import dataclasses
import imageio
import numpy as np
import PIL.Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import dnnlib
import legacy
from xml.dom import minidom
import shutil
from torchvision.utils import save_image, make_grid
from interpolator import interpolate_images
from training.networks import normalize_2nd_moment
from abc import ABC, abstractmethod, abstractstaticmethod, abstractclassmethod
from dataclasses import dataclass, field
from typing import Optional, Type, List, final, Tuple, Callable, Iterator, Iterable, Dict, ClassVar, Union
import matplotlib.pyplot as plt
from torchvision.io import write_video
ImageTensor = torch.Tensor # [B, C, H, W] with data between 0 and 1
class ToStyles(ABC, torch.nn.Module):
@abstractmethod
def to_styles(self):
raise NotImplementedError
Styles = torch.Tensor
def dbg(x):
print(x)
return x
def imview(image):
save_image(image, "tmp/tmp.png")
os.system("code tmp/tmp.png")
| en | 0.976867 | # [B, C, H, W] with data between 0 and 1 | 2.166789 | 2 |
tests/conftest.py | escudocloud/distributed-shuffleindex | 1 | 6614057 | <reponame>escudocloud/distributed-shuffleindex<filename>tests/conftest.py
def pytest_addoption(parser):
parser.addoption('--small', action='store_true', help='limit accesses')
def pytest_generate_tests(metafunc):
if 'N' in metafunc.fixturenames:
metafunc.parametrize('N', [10 ** 2 if metafunc.config.option.small
else 10 ** 4])
| def pytest_addoption(parser):
parser.addoption('--small', action='store_true', help='limit accesses')
def pytest_generate_tests(metafunc):
if 'N' in metafunc.fixturenames:
metafunc.parametrize('N', [10 ** 2 if metafunc.config.option.small
else 10 ** 4]) | none | 1 | 2.212557 | 2 | |
parameter_analyse/generate_data.py | lionelkusch/compare_zerlaut | 0 | 6614058 | # Copyright 2021 Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import numpy as np
import os
def remove_outlier(datas, p=3):
Q1, Q3 = np.quantile(datas, q=[0.25, 0.75], axis=1)
IQR = Q3 - Q1
min_data, max_data = Q1 - p * IQR, Q3 + p * IQR
result = np.empty(datas.shape)
result[:] = np.NAN
for i, data in enumerate(datas):
data_pre = data[np.logical_and(min_data[i] <= data, data <= max_data[i])]
result[i, :data_pre.shape[0]] = data_pre
return result
def compute_rate(data, begin, end, nb):
"""
Compute the firing rate
:param data: the spike of all neurons between end and begin
:param begin: the time of the first spike
:param end: the time of the last spike
:return: the mean and the standard deviation of firing rate, the maximum and minimum of firing rate
"""
# get data
n_fil = data[:]
n_fil = n_fil.astype(int)
# count the number of the same id
count_of_n = np.bincount(n_fil)
# compute the rate
rate_each_n_incomplet = count_of_n / (end - begin)
# fill the table with the neurons which are not firing
rate_each_n = np.concatenate(
(rate_each_n_incomplet, np.zeros(-np.shape(rate_each_n_incomplet)[0] + nb + 1)))
return rate_each_n[1:]
def generate_rates(parameters,
MAXfexc, MINfexc, nb_value_fexc,
MAXfinh, MINfinh, nb_value_finh,
MAXadaptation, MINadaptation, nb_value_adaptation,
MAXJump, MINJump,
nb_neurons, name_file, dt, tstop):
import nest
nest.set_verbosity(100)
if not os.path.exists(name_file):
os.makedirs(name_file)
# initialisation of the parameter
params = {'g_L': parameters['g_L'],
'E_L': parameters['E_L'],
'V_reset': parameters['V_reset'],
'I_e': parameters['I_e'],
'C_m': parameters['C_m'],
'V_th': parameters['V_th'],
't_ref': parameters['t_ref'],
'tau_w': parameters['tau_w'],
'Delta_T': parameters['Delta_T'],
'b': parameters['b'],
'a': parameters['a'],
'V_peak': parameters['V_peak'],
'E_ex': parameters['E_ex'],
'E_in': parameters['E_in'],
'tau_syn_ex': parameters['tau_syn_ex'],
'tau_syn_in': parameters['tau_syn_in'],
'gsl_error_tol': 1e-8
}
Number_connexion_ex = parameters['N_tot'] * parameters['p_connect_ex'] * (1 - parameters['g'])
Number_connexion_in = parameters['N_tot'] * parameters['p_connect_in'] * parameters['g']
simtime = tstop * 1e3
dt = dt * 1e3
master_seed = 0
local_num_threads = 8
# intialisation of variable
fiSim = np.repeat(np.linspace(MINfinh, MAXfinh, nb_value_finh), nb_value_adaptation).reshape(
nb_value_finh * nb_value_adaptation) * Number_connexion_in
adaptation = np.repeat([np.linspace(MINadaptation, MAXadaptation, nb_value_adaptation)], nb_value_finh,
axis=0).reshape(
nb_value_finh * nb_value_adaptation)
feSim = np.zeros((nb_value_fexc, nb_value_finh * nb_value_adaptation))
feOut = np.zeros((nb_value_fexc, nb_value_finh * nb_value_adaptation, nb_neurons))
MAXdfex = (MAXfexc - MINfexc) / nb_value_fexc
dFex = np.ones((nb_value_adaptation * nb_value_finh)) * MAXdfex
index_end = np.zeros((nb_value_adaptation * nb_value_finh), dtype=np.int)
index = np.where(index_end >= 0)
while index[0].size > 0:
step = np.min(index_end[index])
index_min = index[0][np.argmin(index_end[index])]
print(step, index_min, dFex[index_min], feOut[[step - 1], index_min, :], feOut[step, index_min, :],
np.nanmean(remove_outlier(feOut[[step - 1], index_min])),
np.nanmean(remove_outlier(feOut[[step], index_min])), feSim[step, index_min],
fiSim[index_min], adaptation[index_min])
# simulation
simulation = False
error = 1.0e-6
while error > 1.0e-20 and not simulation:
params['gsl_error_tol'] = error
# initialisation of nest
nest.ResetKernel()
nest.SetKernelStatus({
# Resolution of the simulation (in ms).
"resolution": dt,
# Print the time progress, this should only be used when the simulation
# is run on a local machine.
"print_time": True,
# If True, data will be overwritten,
# If False, a NESTError is raised if the files already exist.
"overwrite_files": True,
# Number of threads per MPI process.
'local_num_threads': local_num_threads,
# Path to save the output data
'data_path': name_file,
# Masterseed for NEST and NumPy
'grng_seed': master_seed + local_num_threads,
# Seeds for the individual processes
'rng_seeds': range(master_seed + 1 + local_num_threads, master_seed + 1 + (2 * local_num_threads)),
})
# create the network
nest.SetDefaults('aeif_cond_exp', params)
neurons = nest.Create('aeif_cond_exp', index[0].size * nb_neurons)
nest.SetStatus(neurons, "I_e", -np.repeat(adaptation[index], nb_neurons).ravel())
poisson_generator_ex = nest.Create('poisson_generator', index[0].size * nb_neurons)
poisson_generator_in = nest.Create('poisson_generator', index[0].size * nb_neurons)
nest.SetStatus(poisson_generator_in, 'rate', np.repeat(fiSim[index], nb_neurons).ravel())
nest.SetStatus(poisson_generator_ex, 'rate',
np.repeat(feSim[index_end[index].ravel(), index], nb_neurons) * Number_connexion_ex)
nest.CopyModel("static_synapse", "excitatory",
{"weight": parameters['Q_e'], "delay": 1.0})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": -parameters['Q_i'], "delay": 1.0})
nest.Connect(poisson_generator_ex, neurons, 'one_to_one', syn_spec="excitatory")
nest.Connect(poisson_generator_in, neurons, 'one_to_one', syn_spec="inhibitory")
# create spike detector
spikes_dec = nest.Create("spike_recorder")
nest.Connect(neurons, spikes_dec)
try:
nest.Simulate(simtime)
simulation = True
except nest.NESTError as exception:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(exception).__name__, exception.args)
print(message)
error = error / 10.0
# compute firing rate
data = nest.GetStatus(spikes_dec)[0]['events']['senders']
feOut[index_end[index].ravel(), index] = compute_rate(data, 0.0, simtime, index[0].size * nb_neurons).reshape(
index[0].size, nb_neurons) * 1e3
jump = np.nanmean(remove_outlier(feOut[index_end.ravel(), np.arange(0, index_end.size)]), axis=1) \
- np.nanmean(remove_outlier(feOut[(index_end - 1).ravel(), np.arange(0, index_end.size)]), axis=1)
# rescale if jump to big
update_index = np.where(np.logical_and(index_end >= 0, jump > MAXJump))
feSim[index_end.ravel()[update_index], update_index] -= dFex[update_index]
dFex[update_index] /= 2
feSim[index_end.ravel()[update_index], update_index] += dFex[update_index]
# increase external input if no spike (initial condition of external input)
update_index = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump < MINJump))
feSim[index_end.ravel()[update_index], update_index] += dFex[update_index]
dFex[update_index] += dFex[update_index] * 0.1
# save the data and pass at next value
update_index = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump >= MINJump))
index_end[update_index] += 1
index_end[np.where(index_end == nb_value_fexc)[0]] = -1
update = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump > MINJump))
feSim[index_end.ravel()[update], update] = feSim[index_end.ravel()[update] - 1, update] + dFex[update]
update = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), dFex < MAXdfex))[0]
dFex[update][np.where(dFex[update] < MAXdfex)] += dFex[update] * 0.1
index = np.where(index_end >= 0)
np.save(name_file + '/fout.npy', feOut)
np.save(name_file + '/fin.npy', feSim)
| # Copyright 2021 Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import numpy as np
import os
def remove_outlier(datas, p=3):
Q1, Q3 = np.quantile(datas, q=[0.25, 0.75], axis=1)
IQR = Q3 - Q1
min_data, max_data = Q1 - p * IQR, Q3 + p * IQR
result = np.empty(datas.shape)
result[:] = np.NAN
for i, data in enumerate(datas):
data_pre = data[np.logical_and(min_data[i] <= data, data <= max_data[i])]
result[i, :data_pre.shape[0]] = data_pre
return result
def compute_rate(data, begin, end, nb):
"""
Compute the firing rate
:param data: the spike of all neurons between end and begin
:param begin: the time of the first spike
:param end: the time of the last spike
:return: the mean and the standard deviation of firing rate, the maximum and minimum of firing rate
"""
# get data
n_fil = data[:]
n_fil = n_fil.astype(int)
# count the number of the same id
count_of_n = np.bincount(n_fil)
# compute the rate
rate_each_n_incomplet = count_of_n / (end - begin)
# fill the table with the neurons which are not firing
rate_each_n = np.concatenate(
(rate_each_n_incomplet, np.zeros(-np.shape(rate_each_n_incomplet)[0] + nb + 1)))
return rate_each_n[1:]
def generate_rates(parameters,
MAXfexc, MINfexc, nb_value_fexc,
MAXfinh, MINfinh, nb_value_finh,
MAXadaptation, MINadaptation, nb_value_adaptation,
MAXJump, MINJump,
nb_neurons, name_file, dt, tstop):
import nest
nest.set_verbosity(100)
if not os.path.exists(name_file):
os.makedirs(name_file)
# initialisation of the parameter
params = {'g_L': parameters['g_L'],
'E_L': parameters['E_L'],
'V_reset': parameters['V_reset'],
'I_e': parameters['I_e'],
'C_m': parameters['C_m'],
'V_th': parameters['V_th'],
't_ref': parameters['t_ref'],
'tau_w': parameters['tau_w'],
'Delta_T': parameters['Delta_T'],
'b': parameters['b'],
'a': parameters['a'],
'V_peak': parameters['V_peak'],
'E_ex': parameters['E_ex'],
'E_in': parameters['E_in'],
'tau_syn_ex': parameters['tau_syn_ex'],
'tau_syn_in': parameters['tau_syn_in'],
'gsl_error_tol': 1e-8
}
Number_connexion_ex = parameters['N_tot'] * parameters['p_connect_ex'] * (1 - parameters['g'])
Number_connexion_in = parameters['N_tot'] * parameters['p_connect_in'] * parameters['g']
simtime = tstop * 1e3
dt = dt * 1e3
master_seed = 0
local_num_threads = 8
# intialisation of variable
fiSim = np.repeat(np.linspace(MINfinh, MAXfinh, nb_value_finh), nb_value_adaptation).reshape(
nb_value_finh * nb_value_adaptation) * Number_connexion_in
adaptation = np.repeat([np.linspace(MINadaptation, MAXadaptation, nb_value_adaptation)], nb_value_finh,
axis=0).reshape(
nb_value_finh * nb_value_adaptation)
feSim = np.zeros((nb_value_fexc, nb_value_finh * nb_value_adaptation))
feOut = np.zeros((nb_value_fexc, nb_value_finh * nb_value_adaptation, nb_neurons))
MAXdfex = (MAXfexc - MINfexc) / nb_value_fexc
dFex = np.ones((nb_value_adaptation * nb_value_finh)) * MAXdfex
index_end = np.zeros((nb_value_adaptation * nb_value_finh), dtype=np.int)
index = np.where(index_end >= 0)
while index[0].size > 0:
step = np.min(index_end[index])
index_min = index[0][np.argmin(index_end[index])]
print(step, index_min, dFex[index_min], feOut[[step - 1], index_min, :], feOut[step, index_min, :],
np.nanmean(remove_outlier(feOut[[step - 1], index_min])),
np.nanmean(remove_outlier(feOut[[step], index_min])), feSim[step, index_min],
fiSim[index_min], adaptation[index_min])
# simulation
simulation = False
error = 1.0e-6
while error > 1.0e-20 and not simulation:
params['gsl_error_tol'] = error
# initialisation of nest
nest.ResetKernel()
nest.SetKernelStatus({
# Resolution of the simulation (in ms).
"resolution": dt,
# Print the time progress, this should only be used when the simulation
# is run on a local machine.
"print_time": True,
# If True, data will be overwritten,
# If False, a NESTError is raised if the files already exist.
"overwrite_files": True,
# Number of threads per MPI process.
'local_num_threads': local_num_threads,
# Path to save the output data
'data_path': name_file,
# Masterseed for NEST and NumPy
'grng_seed': master_seed + local_num_threads,
# Seeds for the individual processes
'rng_seeds': range(master_seed + 1 + local_num_threads, master_seed + 1 + (2 * local_num_threads)),
})
# create the network
nest.SetDefaults('aeif_cond_exp', params)
neurons = nest.Create('aeif_cond_exp', index[0].size * nb_neurons)
nest.SetStatus(neurons, "I_e", -np.repeat(adaptation[index], nb_neurons).ravel())
poisson_generator_ex = nest.Create('poisson_generator', index[0].size * nb_neurons)
poisson_generator_in = nest.Create('poisson_generator', index[0].size * nb_neurons)
nest.SetStatus(poisson_generator_in, 'rate', np.repeat(fiSim[index], nb_neurons).ravel())
nest.SetStatus(poisson_generator_ex, 'rate',
np.repeat(feSim[index_end[index].ravel(), index], nb_neurons) * Number_connexion_ex)
nest.CopyModel("static_synapse", "excitatory",
{"weight": parameters['Q_e'], "delay": 1.0})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": -parameters['Q_i'], "delay": 1.0})
nest.Connect(poisson_generator_ex, neurons, 'one_to_one', syn_spec="excitatory")
nest.Connect(poisson_generator_in, neurons, 'one_to_one', syn_spec="inhibitory")
# create spike detector
spikes_dec = nest.Create("spike_recorder")
nest.Connect(neurons, spikes_dec)
try:
nest.Simulate(simtime)
simulation = True
except nest.NESTError as exception:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(exception).__name__, exception.args)
print(message)
error = error / 10.0
# compute firing rate
data = nest.GetStatus(spikes_dec)[0]['events']['senders']
feOut[index_end[index].ravel(), index] = compute_rate(data, 0.0, simtime, index[0].size * nb_neurons).reshape(
index[0].size, nb_neurons) * 1e3
jump = np.nanmean(remove_outlier(feOut[index_end.ravel(), np.arange(0, index_end.size)]), axis=1) \
- np.nanmean(remove_outlier(feOut[(index_end - 1).ravel(), np.arange(0, index_end.size)]), axis=1)
# rescale if jump to big
update_index = np.where(np.logical_and(index_end >= 0, jump > MAXJump))
feSim[index_end.ravel()[update_index], update_index] -= dFex[update_index]
dFex[update_index] /= 2
feSim[index_end.ravel()[update_index], update_index] += dFex[update_index]
# increase external input if no spike (initial condition of external input)
update_index = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump < MINJump))
feSim[index_end.ravel()[update_index], update_index] += dFex[update_index]
dFex[update_index] += dFex[update_index] * 0.1
# save the data and pass at next value
update_index = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump >= MINJump))
index_end[update_index] += 1
index_end[np.where(index_end == nb_value_fexc)[0]] = -1
update = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), jump > MINJump))
feSim[index_end.ravel()[update], update] = feSim[index_end.ravel()[update] - 1, update] + dFex[update]
update = np.where(np.logical_and(np.logical_and(index_end >= 0, jump <= MAXJump), dFex < MAXdfex))[0]
dFex[update][np.where(dFex[update] < MAXdfex)] += dFex[update] * 0.1
index = np.where(index_end >= 0)
np.save(name_file + '/fout.npy', feOut)
np.save(name_file + '/fin.npy', feSim)
| en | 0.733733 | # Copyright 2021 Aix-Marseille Université # "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. " Compute the firing rate :param data: the spike of all neurons between end and begin :param begin: the time of the first spike :param end: the time of the last spike :return: the mean and the standard deviation of firing rate, the maximum and minimum of firing rate # get data # count the number of the same id # compute the rate # fill the table with the neurons which are not firing # initialisation of the parameter # intialisation of variable # simulation # initialisation of nest # Resolution of the simulation (in ms). # Print the time progress, this should only be used when the simulation # is run on a local machine. # If True, data will be overwritten, # If False, a NESTError is raised if the files already exist. # Number of threads per MPI process. # Path to save the output data # Masterseed for NEST and NumPy # Seeds for the individual processes # create the network # create spike detector # compute firing rate # rescale if jump to big # increase external input if no spike (initial condition of external input) # save the data and pass at next value | 2.707773 | 3 |
zips/plugin.video.primevideo/extract.py | webplay10/webplay-REPO-MATRIX | 0 | 6614059 | <reponame>webplay10/webplay-REPO-MATRIX<filename>zips/plugin.video.primevideo/extract.py
#import zfile as zipfile
import zipfile
#from zipfile import ZipFile
def all(_in, _out, dp=None):
if dp:
return allWithProgress(_in, _out, dp)
return allNoProgress(_in, _out)
def allNoProgress(_in, _out):
try:
zin = zipfile.ZipFile(_in, 'r')
zin.extractall(_out)
except Exception as e:
print (str(e))
return False
return True
def allWithProgress(_in, _out, dp):
zin = zipfile.ZipFile(_in, 'r')
nFiles = float(len(zin.infolist()))
count = 0
try:
for item in zin.infolist():
count += 1
update = count / nFiles * 100
dp.update(int(update))
zin.extract(item, _out)
except Exception as e:
print (str(e))
return False
return True
#checkintegrity101219 | #import zfile as zipfile
import zipfile
#from zipfile import ZipFile
def all(_in, _out, dp=None):
if dp:
return allWithProgress(_in, _out, dp)
return allNoProgress(_in, _out)
def allNoProgress(_in, _out):
try:
zin = zipfile.ZipFile(_in, 'r')
zin.extractall(_out)
except Exception as e:
print (str(e))
return False
return True
def allWithProgress(_in, _out, dp):
zin = zipfile.ZipFile(_in, 'r')
nFiles = float(len(zin.infolist()))
count = 0
try:
for item in zin.infolist():
count += 1
update = count / nFiles * 100
dp.update(int(update))
zin.extract(item, _out)
except Exception as e:
print (str(e))
return False
return True
#checkintegrity101219 | en | 0.377413 | #import zfile as zipfile #from zipfile import ZipFile #checkintegrity101219 | 3.030823 | 3 |
python_utils/data_structures.py | CardoAI/cardo-python-utils | 0 | 6614060 | import operator
from datetime import datetime, timezone
from decimal import Decimal
from typing import (
Callable, Hashable, Generator, Iterable, List, Any, Dict, Optional, Union
)
from python_utils.math import are_equal
from python_utils.types_hinting import DictOrObject
def safe_find(records: Iterable, filter_func: Callable) -> Optional[Any]:
"""
Wrapper around the filter function, to return only the first record when
there is a match, or None instead.
Args:
records: List to iterate over
filter_func: Function that will be appliead on each record
Returns:
First occurrence found or None
Examples:
>>> safe_find([{'a': 'T', 'b': 'H'}, {'a': 'F'}], lambda rec: rec.get('a') == 'F')
{'a': 'F'}
>>> safe_find([1, 2, 3], lambda rec: rec > 0)
1
>>> safe_find([{'a': 'T'}], lambda rec: rec.get('a') == 'F') is None
True
"""
try:
return next(filter(filter_func, records))
except StopIteration:
return None
def find_by(records: List[Dict], attr: Hashable, value: Any) -> Optional[Dict]:
"""
Given a list of dicts and an attribute, value, return the first occurrence
where dict[attr] == value or None if no occurrence accomplishes the condition
Args:
records: list of dicts to iterate over
attr: the attr we're trying to check for
value: value to make the check against
Returns:
Dict record if found or None
Examples:
>>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='b', value='H')
{'a': 'T', 'b': 'H'}
>>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='a', value='Hello') is None
True
"""
return safe_find(records, lambda rec: value == rec.get(attr))
def filter_dicts(records: List[Dict], as_list=True, **filters) -> Union[List[Dict], Iterable[Dict]]:
"""
Pass a list of dicts and filters as kwargs to get all filtered records as list or generator
Args:
records: list of dicts to iterate and filter from
as_list: Flag that shows if found records should be returned as list
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Filtered records as filter obj or list
Examples:
>>> test_d = [{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}]
>>> filter_dicts(test_d, a=1, d=3)
[{'a': 1, 'd': 3}]
>>> filter_dicts(test_d, a=1, b=3, d=3)
[]
>>> filter_dicts(test_d)
[{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}]
>>> filter_dicts([{}])
[{}]
>>> type(filter_dicts(test_d, as_list=False, a=1))
<class 'filter'>
>>> sum(1 for _ in filter_dicts(test_d, as_list=False, a=1))
2
"""
filtered_records = filter(
lambda rec: all([rec.get(key) == value for key, value in filters.items()]), records)
if as_list:
filtered_records = list(filtered_records)
return filtered_records
def filter_objects(objects: List[object], as_list=True, **filters) -> Union[List[object], Iterable[Dict]]:
"""
Pass a list of objects and filters as kwargs to get all filtered records as list or filter obj
Args:
objects: list of objects to iterate and filter from
as_list: Flag that shows if found records should be returned as list
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Filtered records as filter obj or list
Examples:
>>> class A:
... def __init__(self, var1, var2):
... self.var1, self.var2 = var1, var2
... def __repr__(self):
... return f'{self.var1}-{self.var2}'
...
>>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=1)
[test-1]
>>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=2)
[]
>>> filter_objects([A('test', 1), A('test2', 2)])
[test-1, test2-2]
>>> filter_objects([{}])
[{}]
>>> type(filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2))
<class 'filter'>
>>> sum(1 for _ in filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2))
2
"""
filtered_objects = filter(
lambda rec: all([getattr(rec, key) == value for key, value in filters.items()]), objects)
if as_list:
filtered_objects = list(filtered_objects)
return filtered_objects
def find_object(objects: List[object], **filters) -> Optional[object]:
"""
Pass a list of objects and filters as kwargs to get first occurence record. If no filters
passed return first object in the list
Args:
objects: list of objects to iterate and filter from
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Found record obj or None
Examples:
>>> class A:
... def __init__(self, var1, var2, var3=False):
... self.var1, self.var2, self.var3 = var1, var2, var3
... def __repr__(self):
... return f'{self.var1}-{self.var2}'
...
>>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=1)
test-1
>>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=2) is None
True
>>> find_object([{}])
{}
"""
for rec in objects:
if all([getattr(rec, key) == value for key, value in filters.items()]):
return rec
def exclude_keys(dictionary: Dict, keys: List[Hashable]) -> Dict:
"""
Create a new dictionary, excluding the keys given.
Args:
dictionary: Source dict
keys: list of keys which we don't want to include in the new dict
Examples:
>>> exclude_keys({'a': 1, 'b': 2}, keys=['a'])
{'b': 2}
"""
return {k: v for k, v in dictionary.items() if k not in keys}
def keep_keys(dictionary: Dict, keys: List[Hashable]) -> Dict:
"""
Create a new dictionary, keeping only the given keys.
Args:
dictionary: Source dict
keys: list of dict keys which we want to include in the new dict
Examples:
>>> keep_keys({'a': 1, 'b': 2}, keys=['a'])
{'a': 1}
"""
return {k: v for k, v in dictionary.items() if k in keys}
def exclude_none_values(dictionary: Dict) -> Dict:
"""
Create a new dictionary, removing the keys whose value is None.
Args:
dictionary: Source dict
Examples:
>>> exclude_none_values({'a': None, 'b': 1})
{'b': 1}
"""
return {k: v for k, v in dictionary.items() if v is not None}
def get_values(dictionary: Dict, keys: List[Hashable], dtypes: Dict = None) -> Dict:
"""
Get values from dictionary whose keys are in the keys list
Args:
dictionary: Dictionary with the values
keys: List of keys to extract
dtypes: A mapping of fields to types, used to convert fields
Returns:
New dict with the key:value pairs
Examples:
>>> get_values({'a': '1', 'b': 2, 'c': '3'}, keys=['a', 'c'], dtypes={'a': int})
{'a': 1, 'c': '3'}
"""
data = {}
for key in keys:
value = dictionary.get(key)
# Apply type conversion
if dtypes and value is not None and dtypes.get(key):
value = dtypes[key](value)
data[key] = value
return data
def get_differences(
old_data: DictOrObject,
new_data: Dict,
skip_keys: List[Hashable] = None,
number_precision: int = 6
) -> Dict:
"""
Get a dictionary with the values that have changed between two versions of data.
Args:
old_data: Object or dictionary containing the old version of the data
new_data: Dictionary containing the new version of the data
skip_keys: Optional list of keys to skip during comparison
number_precision: Precision used for number comparisons
Returns:
Dict containing the keys that have changed with the new respective values
"""
differences = {}
if not skip_keys:
skip_keys = []
old_data_is_dict = isinstance(old_data, dict)
for key, new_value in new_data.items():
if key in skip_keys:
continue
if old_data_is_dict:
old_value = old_data[key]
else:
old_value = getattr(old_data, key)
# Process Decimal
# Process datetime - Add UTC timezone when missing
if isinstance(old_value, datetime) and not old_value.tzinfo:
old_value = old_value.replace(tzinfo=timezone.utc)
if isinstance(new_value, datetime) and not new_value.tzinfo:
new_value = new_value.replace(tzinfo=timezone.utc)
if isinstance(new_value, (float, Decimal)):
# Compare numbers taking precision into consideration
if (old_value is None
or not are_equal(old_value, new_value, precision=number_precision)): # type: ignore
differences[key] = new_value
# Compare values
else:
if old_value != new_value:
differences[key] = new_value
return differences
def have_equal_values(dict1: Dict, dict2: Dict, attributes: List = None) -> bool:
"""
Compare if the given attributes of two dictionaries are equal.
Args:
dict1: dict to be compared
dict2: dict to be compared against
attributes: list of keys that are going to compare, default None, check all keys in dict1
Returns:
True if dict1 attributes are equal with dict2, False otherwise
Examples:
>>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2}, attributes=['a'])
True
>>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2})
False
"""
if not attributes:
attributes = dict1.keys()
for attribute in attributes:
if dict1.get(attribute) != dict2.get(attribute):
return False
return True
def get_nested(dictionary: Dict, *attrs: Hashable) -> Optional[Any]:
"""
Access a nested value in a dict by passing the keys of all the levels.
Args:
dictionary: Dict object we want to access
*attrs: Keys we want to access
Returns:
The value we want to get or None if it can't be found
Raises:
AttributeError: When the original object or a nested one is not a dict
Examples:
>>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'b', 'c')
'Value'
>>> get_nested({}, 'a') is None
True
>>> get_nested({'a': 'b'}, 'c') is None
True
>>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'd', 'c') is None
True
>>> get_nested(1, 'a') is None # type: ignore
True
"""
if not dictionary or not attrs:
return None
current_value = dictionary
for attr in attrs:
try:
current_value = current_value.get(attr)
except AttributeError:
return None
return current_value
def lists_intersection(list1: List, list2: List) -> List:
"""
Find the common elements between 2 lists.
Args:
list1: list to look for intersection
list2: list to be looked against
Returns:
new list with the intersection of the 2 lists
Examples:
>>> lists_intersection([1, 2, 3], [2, 3, 4])
[2, 3]
>>> lists_intersection([1, 2, 3], [4, 5, 6])
[]
>>> lists_intersection([1, 'test', 3], [4, 5, 'test'])
['test']
"""
return list(set(list1).intersection(list2))
def chunks(lst: List, chunk: int) -> Generator[List, None, None]:
"""
Yield successive n-sized chunks from lst.
Args:
lst: list to yield chunks from
chunk: size of the chunk
Returns:
list of elements from generator
Examples:
>>> a = chunks([1, 2, 3, 4, 5, 6], chunk=2)
>>> print(type(a))
<class 'generator'>
>>> sum(1 for _ in a)
3
>>> next(chunks([1, 2, 3, 4, 5, 6], chunk=2))
[1, 2]
"""
for i in range(0, len(lst), chunk):
yield lst[i: i + chunk]
def ints(values: List[str]) -> List[int]:
"""
Converts elements of a list to ints
Args:
values: list of elements to be converted
Returns:
list with ints representations of elements
Examples:
>>> ints(['1', '2', '3'])
[1, 2, 3]
"""
return [int(value) for value in values]
def any_not_none(iterable: Iterable) -> bool:
"""
Verify if any of the elements of the iterable is not None.
The default behaviour of the builtin any function checks the value
with `if element` and causes the values like zero (0) to be
treated as Falsy values. This function aims to change this behaviour
to return False only when all the values are None.
Args:
iterable: Iterable in which we will look for not None values
Returns:
bool value indicating if at least one element is not None
Examples:
>>> any_not_none([None, None, 13])
True
>>> any_not_none([None, None, 0])
True
>>> any_not_none([None])
False
>>> any_not_none([])
False
"""
for element in iterable:
if element is not None:
return True
return False
class Finder:
"""
Finder class to be used for imitating an ORM with dict and objects
Examples:
>>> records = [{'id': 1, 'foo': 'bar'}, {'id': 2, 'a': 'b'}]
>>> Finder(records).find(id=1)
{'id': 1, 'foo': 'bar'}
>>> Finder(records).filter(foo='bar')
[{'id': 1, 'foo': 'bar'}]
"""
ops = {
"gt": operator.gt,
"gte": operator.ge,
"lt": operator.lt,
"lte": operator.le,
"eq": operator.eq,
"ne": operator.ne,
"in": lambda item, iterable: item in iterable,
}
available_ops = ops.keys()
def __init__(self, records: List[DictOrObject]):
self._records = records
@classmethod
def _compare(cls, value1: Any, op: str, value2: Any, ignore_types=False) -> bool:
if ignore_types:
value1 = str(value1)
value2 = str(value2)
return cls.ops[op](value1, value2)
@classmethod
def _verify(cls, record: DictOrObject, ignore_types=False, **checks) -> bool:
"""
Verify that the record fulfills the given checks.
Args:
record: The record to be verified: dict or object
ignore_types: If True, compare the values as strings
**checks: Dictionary with checks in the form: {"attr__gt": 5}
Returns:
True if the record passes *all* the checks, False otherwise
"""
for key, value in checks.items():
if "__" in key:
elements = key.split("__")
# If operator has been declared
if elements[-1] in cls.available_ops:
attr = ".".join(elements[:-1])
op = elements[-1]
# eq operator by default
else:
attr = ".".join(elements)
op = "eq"
else:
attr = key
op = "eq"
if not cls._compare(cls._get_value(record, attr), op, value, ignore_types):
return False
return True
@staticmethod
def _get_value(record: DictOrObject, attr: str) -> Optional[Any]:
"""
Get the value of the attribute for the given record. Used to process dicts and objects uniformly.
Args:
record: The record from which the value will be retrieved. Can be a dict or an object
attr: The attribute to retrieve value. For objects, supports syntax like 'asset__debtor__country_id'.
Returns:
Value found or None
"""
if isinstance(record, dict):
return record.get(attr)
else:
try:
while "." in attr:
current_attr, attr = attr.split(".", 1)
record = getattr(record, current_attr)
return getattr(record, attr)
except AttributeError:
return None
def filter(self, as_list=True, ignore_types=False, **filters):
filtered_records = filter(lambda rec: self._verify(rec, ignore_types, **filters), self._records)
if as_list:
filtered_records = list(filtered_records)
return filtered_records
def find(self, **filters):
for record in self._records:
if self._verify(record, **filters):
return record
| import operator
from datetime import datetime, timezone
from decimal import Decimal
from typing import (
Callable, Hashable, Generator, Iterable, List, Any, Dict, Optional, Union
)
from python_utils.math import are_equal
from python_utils.types_hinting import DictOrObject
def safe_find(records: Iterable, filter_func: Callable) -> Optional[Any]:
"""
Wrapper around the filter function, to return only the first record when
there is a match, or None instead.
Args:
records: List to iterate over
filter_func: Function that will be appliead on each record
Returns:
First occurrence found or None
Examples:
>>> safe_find([{'a': 'T', 'b': 'H'}, {'a': 'F'}], lambda rec: rec.get('a') == 'F')
{'a': 'F'}
>>> safe_find([1, 2, 3], lambda rec: rec > 0)
1
>>> safe_find([{'a': 'T'}], lambda rec: rec.get('a') == 'F') is None
True
"""
try:
return next(filter(filter_func, records))
except StopIteration:
return None
def find_by(records: List[Dict], attr: Hashable, value: Any) -> Optional[Dict]:
"""
Given a list of dicts and an attribute, value, return the first occurrence
where dict[attr] == value or None if no occurrence accomplishes the condition
Args:
records: list of dicts to iterate over
attr: the attr we're trying to check for
value: value to make the check against
Returns:
Dict record if found or None
Examples:
>>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='b', value='H')
{'a': 'T', 'b': 'H'}
>>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='a', value='Hello') is None
True
"""
return safe_find(records, lambda rec: value == rec.get(attr))
def filter_dicts(records: List[Dict], as_list=True, **filters) -> Union[List[Dict], Iterable[Dict]]:
"""
Pass a list of dicts and filters as kwargs to get all filtered records as list or generator
Args:
records: list of dicts to iterate and filter from
as_list: Flag that shows if found records should be returned as list
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Filtered records as filter obj or list
Examples:
>>> test_d = [{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}]
>>> filter_dicts(test_d, a=1, d=3)
[{'a': 1, 'd': 3}]
>>> filter_dicts(test_d, a=1, b=3, d=3)
[]
>>> filter_dicts(test_d)
[{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}]
>>> filter_dicts([{}])
[{}]
>>> type(filter_dicts(test_d, as_list=False, a=1))
<class 'filter'>
>>> sum(1 for _ in filter_dicts(test_d, as_list=False, a=1))
2
"""
filtered_records = filter(
lambda rec: all([rec.get(key) == value for key, value in filters.items()]), records)
if as_list:
filtered_records = list(filtered_records)
return filtered_records
def filter_objects(objects: List[object], as_list=True, **filters) -> Union[List[object], Iterable[Dict]]:
"""
Pass a list of objects and filters as kwargs to get all filtered records as list or filter obj
Args:
objects: list of objects to iterate and filter from
as_list: Flag that shows if found records should be returned as list
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Filtered records as filter obj or list
Examples:
>>> class A:
... def __init__(self, var1, var2):
... self.var1, self.var2 = var1, var2
... def __repr__(self):
... return f'{self.var1}-{self.var2}'
...
>>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=1)
[test-1]
>>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=2)
[]
>>> filter_objects([A('test', 1), A('test2', 2)])
[test-1, test2-2]
>>> filter_objects([{}])
[{}]
>>> type(filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2))
<class 'filter'>
>>> sum(1 for _ in filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2))
2
"""
filtered_objects = filter(
lambda rec: all([getattr(rec, key) == value for key, value in filters.items()]), objects)
if as_list:
filtered_objects = list(filtered_objects)
return filtered_objects
def find_object(objects: List[object], **filters) -> Optional[object]:
"""
Pass a list of objects and filters as kwargs to get first occurence record. If no filters
passed return first object in the list
Args:
objects: list of objects to iterate and filter from
**filters: kwargs to be used for filtering as key:value pairs
Returns:
Found record obj or None
Examples:
>>> class A:
... def __init__(self, var1, var2, var3=False):
... self.var1, self.var2, self.var3 = var1, var2, var3
... def __repr__(self):
... return f'{self.var1}-{self.var2}'
...
>>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=1)
test-1
>>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=2) is None
True
>>> find_object([{}])
{}
"""
for rec in objects:
if all([getattr(rec, key) == value for key, value in filters.items()]):
return rec
def exclude_keys(dictionary: Dict, keys: List[Hashable]) -> Dict:
"""
Create a new dictionary, excluding the keys given.
Args:
dictionary: Source dict
keys: list of keys which we don't want to include in the new dict
Examples:
>>> exclude_keys({'a': 1, 'b': 2}, keys=['a'])
{'b': 2}
"""
return {k: v for k, v in dictionary.items() if k not in keys}
def keep_keys(dictionary: Dict, keys: List[Hashable]) -> Dict:
"""
Create a new dictionary, keeping only the given keys.
Args:
dictionary: Source dict
keys: list of dict keys which we want to include in the new dict
Examples:
>>> keep_keys({'a': 1, 'b': 2}, keys=['a'])
{'a': 1}
"""
return {k: v for k, v in dictionary.items() if k in keys}
def exclude_none_values(dictionary: Dict) -> Dict:
"""
Create a new dictionary, removing the keys whose value is None.
Args:
dictionary: Source dict
Examples:
>>> exclude_none_values({'a': None, 'b': 1})
{'b': 1}
"""
return {k: v for k, v in dictionary.items() if v is not None}
def get_values(dictionary: Dict, keys: List[Hashable], dtypes: Dict = None) -> Dict:
"""
Get values from dictionary whose keys are in the keys list
Args:
dictionary: Dictionary with the values
keys: List of keys to extract
dtypes: A mapping of fields to types, used to convert fields
Returns:
New dict with the key:value pairs
Examples:
>>> get_values({'a': '1', 'b': 2, 'c': '3'}, keys=['a', 'c'], dtypes={'a': int})
{'a': 1, 'c': '3'}
"""
data = {}
for key in keys:
value = dictionary.get(key)
# Apply type conversion
if dtypes and value is not None and dtypes.get(key):
value = dtypes[key](value)
data[key] = value
return data
def get_differences(
old_data: DictOrObject,
new_data: Dict,
skip_keys: List[Hashable] = None,
number_precision: int = 6
) -> Dict:
"""
Get a dictionary with the values that have changed between two versions of data.
Args:
old_data: Object or dictionary containing the old version of the data
new_data: Dictionary containing the new version of the data
skip_keys: Optional list of keys to skip during comparison
number_precision: Precision used for number comparisons
Returns:
Dict containing the keys that have changed with the new respective values
"""
differences = {}
if not skip_keys:
skip_keys = []
old_data_is_dict = isinstance(old_data, dict)
for key, new_value in new_data.items():
if key in skip_keys:
continue
if old_data_is_dict:
old_value = old_data[key]
else:
old_value = getattr(old_data, key)
# Process Decimal
# Process datetime - Add UTC timezone when missing
if isinstance(old_value, datetime) and not old_value.tzinfo:
old_value = old_value.replace(tzinfo=timezone.utc)
if isinstance(new_value, datetime) and not new_value.tzinfo:
new_value = new_value.replace(tzinfo=timezone.utc)
if isinstance(new_value, (float, Decimal)):
# Compare numbers taking precision into consideration
if (old_value is None
or not are_equal(old_value, new_value, precision=number_precision)): # type: ignore
differences[key] = new_value
# Compare values
else:
if old_value != new_value:
differences[key] = new_value
return differences
def have_equal_values(dict1: Dict, dict2: Dict, attributes: List = None) -> bool:
"""
Compare if the given attributes of two dictionaries are equal.
Args:
dict1: dict to be compared
dict2: dict to be compared against
attributes: list of keys that are going to compare, default None, check all keys in dict1
Returns:
True if dict1 attributes are equal with dict2, False otherwise
Examples:
>>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2}, attributes=['a'])
True
>>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2})
False
"""
if not attributes:
attributes = dict1.keys()
for attribute in attributes:
if dict1.get(attribute) != dict2.get(attribute):
return False
return True
def get_nested(dictionary: Dict, *attrs: Hashable) -> Optional[Any]:
"""
Access a nested value in a dict by passing the keys of all the levels.
Args:
dictionary: Dict object we want to access
*attrs: Keys we want to access
Returns:
The value we want to get or None if it can't be found
Raises:
AttributeError: When the original object or a nested one is not a dict
Examples:
>>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'b', 'c')
'Value'
>>> get_nested({}, 'a') is None
True
>>> get_nested({'a': 'b'}, 'c') is None
True
>>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'd', 'c') is None
True
>>> get_nested(1, 'a') is None # type: ignore
True
"""
if not dictionary or not attrs:
return None
current_value = dictionary
for attr in attrs:
try:
current_value = current_value.get(attr)
except AttributeError:
return None
return current_value
def lists_intersection(list1: List, list2: List) -> List:
"""
Find the common elements between 2 lists.
Args:
list1: list to look for intersection
list2: list to be looked against
Returns:
new list with the intersection of the 2 lists
Examples:
>>> lists_intersection([1, 2, 3], [2, 3, 4])
[2, 3]
>>> lists_intersection([1, 2, 3], [4, 5, 6])
[]
>>> lists_intersection([1, 'test', 3], [4, 5, 'test'])
['test']
"""
return list(set(list1).intersection(list2))
def chunks(lst: List, chunk: int) -> Generator[List, None, None]:
"""
Yield successive n-sized chunks from lst.
Args:
lst: list to yield chunks from
chunk: size of the chunk
Returns:
list of elements from generator
Examples:
>>> a = chunks([1, 2, 3, 4, 5, 6], chunk=2)
>>> print(type(a))
<class 'generator'>
>>> sum(1 for _ in a)
3
>>> next(chunks([1, 2, 3, 4, 5, 6], chunk=2))
[1, 2]
"""
for i in range(0, len(lst), chunk):
yield lst[i: i + chunk]
def ints(values: List[str]) -> List[int]:
"""
Converts elements of a list to ints
Args:
values: list of elements to be converted
Returns:
list with ints representations of elements
Examples:
>>> ints(['1', '2', '3'])
[1, 2, 3]
"""
return [int(value) for value in values]
def any_not_none(iterable: Iterable) -> bool:
"""
Verify if any of the elements of the iterable is not None.
The default behaviour of the builtin any function checks the value
with `if element` and causes the values like zero (0) to be
treated as Falsy values. This function aims to change this behaviour
to return False only when all the values are None.
Args:
iterable: Iterable in which we will look for not None values
Returns:
bool value indicating if at least one element is not None
Examples:
>>> any_not_none([None, None, 13])
True
>>> any_not_none([None, None, 0])
True
>>> any_not_none([None])
False
>>> any_not_none([])
False
"""
for element in iterable:
if element is not None:
return True
return False
class Finder:
"""
Finder class to be used for imitating an ORM with dict and objects
Examples:
>>> records = [{'id': 1, 'foo': 'bar'}, {'id': 2, 'a': 'b'}]
>>> Finder(records).find(id=1)
{'id': 1, 'foo': 'bar'}
>>> Finder(records).filter(foo='bar')
[{'id': 1, 'foo': 'bar'}]
"""
ops = {
"gt": operator.gt,
"gte": operator.ge,
"lt": operator.lt,
"lte": operator.le,
"eq": operator.eq,
"ne": operator.ne,
"in": lambda item, iterable: item in iterable,
}
available_ops = ops.keys()
def __init__(self, records: List[DictOrObject]):
self._records = records
@classmethod
def _compare(cls, value1: Any, op: str, value2: Any, ignore_types=False) -> bool:
if ignore_types:
value1 = str(value1)
value2 = str(value2)
return cls.ops[op](value1, value2)
@classmethod
def _verify(cls, record: DictOrObject, ignore_types=False, **checks) -> bool:
"""
Verify that the record fulfills the given checks.
Args:
record: The record to be verified: dict or object
ignore_types: If True, compare the values as strings
**checks: Dictionary with checks in the form: {"attr__gt": 5}
Returns:
True if the record passes *all* the checks, False otherwise
"""
for key, value in checks.items():
if "__" in key:
elements = key.split("__")
# If operator has been declared
if elements[-1] in cls.available_ops:
attr = ".".join(elements[:-1])
op = elements[-1]
# eq operator by default
else:
attr = ".".join(elements)
op = "eq"
else:
attr = key
op = "eq"
if not cls._compare(cls._get_value(record, attr), op, value, ignore_types):
return False
return True
@staticmethod
def _get_value(record: DictOrObject, attr: str) -> Optional[Any]:
"""
Get the value of the attribute for the given record. Used to process dicts and objects uniformly.
Args:
record: The record from which the value will be retrieved. Can be a dict or an object
attr: The attribute to retrieve value. For objects, supports syntax like 'asset__debtor__country_id'.
Returns:
Value found or None
"""
if isinstance(record, dict):
return record.get(attr)
else:
try:
while "." in attr:
current_attr, attr = attr.split(".", 1)
record = getattr(record, current_attr)
return getattr(record, attr)
except AttributeError:
return None
def filter(self, as_list=True, ignore_types=False, **filters):
filtered_records = filter(lambda rec: self._verify(rec, ignore_types, **filters), self._records)
if as_list:
filtered_records = list(filtered_records)
return filtered_records
def find(self, **filters):
for record in self._records:
if self._verify(record, **filters):
return record
| en | 0.624438 | Wrapper around the filter function, to return only the first record when there is a match, or None instead. Args: records: List to iterate over filter_func: Function that will be appliead on each record Returns: First occurrence found or None Examples: >>> safe_find([{'a': 'T', 'b': 'H'}, {'a': 'F'}], lambda rec: rec.get('a') == 'F') {'a': 'F'} >>> safe_find([1, 2, 3], lambda rec: rec > 0) 1 >>> safe_find([{'a': 'T'}], lambda rec: rec.get('a') == 'F') is None True Given a list of dicts and an attribute, value, return the first occurrence where dict[attr] == value or None if no occurrence accomplishes the condition Args: records: list of dicts to iterate over attr: the attr we're trying to check for value: value to make the check against Returns: Dict record if found or None Examples: >>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='b', value='H') {'a': 'T', 'b': 'H'} >>> find_by([{'a': 'T', 'b': 'H'}, {'a': 'F'}], attr='a', value='Hello') is None True Pass a list of dicts and filters as kwargs to get all filtered records as list or generator Args: records: list of dicts to iterate and filter from as_list: Flag that shows if found records should be returned as list **filters: kwargs to be used for filtering as key:value pairs Returns: Filtered records as filter obj or list Examples: >>> test_d = [{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}] >>> filter_dicts(test_d, a=1, d=3) [{'a': 1, 'd': 3}] >>> filter_dicts(test_d, a=1, b=3, d=3) [] >>> filter_dicts(test_d) [{'a': 1, 'b': 2}, {'a': 0, 'b': 3}, {'a': 1, 'd': 3}, {'a': 2, 'd': 3}] >>> filter_dicts([{}]) [{}] >>> type(filter_dicts(test_d, as_list=False, a=1)) <class 'filter'> >>> sum(1 for _ in filter_dicts(test_d, as_list=False, a=1)) 2 Pass a list of objects and filters as kwargs to get all filtered records as list or filter obj Args: objects: list of objects to iterate and filter from as_list: Flag that shows if found records should be returned as list **filters: kwargs to be used for filtering as key:value pairs Returns: Filtered records as filter obj or list Examples: >>> class A: ... def __init__(self, var1, var2): ... self.var1, self.var2 = var1, var2 ... def __repr__(self): ... return f'{self.var1}-{self.var2}' ... >>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=1) [test-1] >>> filter_objects([A('test', 1), A('test2', 2)], var1='test', var2=2) [] >>> filter_objects([A('test', 1), A('test2', 2)]) [test-1, test2-2] >>> filter_objects([{}]) [{}] >>> type(filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2)) <class 'filter'> >>> sum(1 for _ in filter_objects([A('test', 2), A('test2', 2)], as_list=False, var2=2)) 2 Pass a list of objects and filters as kwargs to get first occurence record. If no filters passed return first object in the list Args: objects: list of objects to iterate and filter from **filters: kwargs to be used for filtering as key:value pairs Returns: Found record obj or None Examples: >>> class A: ... def __init__(self, var1, var2, var3=False): ... self.var1, self.var2, self.var3 = var1, var2, var3 ... def __repr__(self): ... return f'{self.var1}-{self.var2}' ... >>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=1) test-1 >>> find_object([A('test', 1), A('test2', 2)], var1='test', var2=2) is None True >>> find_object([{}]) {} Create a new dictionary, excluding the keys given. Args: dictionary: Source dict keys: list of keys which we don't want to include in the new dict Examples: >>> exclude_keys({'a': 1, 'b': 2}, keys=['a']) {'b': 2} Create a new dictionary, keeping only the given keys. Args: dictionary: Source dict keys: list of dict keys which we want to include in the new dict Examples: >>> keep_keys({'a': 1, 'b': 2}, keys=['a']) {'a': 1} Create a new dictionary, removing the keys whose value is None. Args: dictionary: Source dict Examples: >>> exclude_none_values({'a': None, 'b': 1}) {'b': 1} Get values from dictionary whose keys are in the keys list Args: dictionary: Dictionary with the values keys: List of keys to extract dtypes: A mapping of fields to types, used to convert fields Returns: New dict with the key:value pairs Examples: >>> get_values({'a': '1', 'b': 2, 'c': '3'}, keys=['a', 'c'], dtypes={'a': int}) {'a': 1, 'c': '3'} # Apply type conversion Get a dictionary with the values that have changed between two versions of data. Args: old_data: Object or dictionary containing the old version of the data new_data: Dictionary containing the new version of the data skip_keys: Optional list of keys to skip during comparison number_precision: Precision used for number comparisons Returns: Dict containing the keys that have changed with the new respective values # Process Decimal # Process datetime - Add UTC timezone when missing # Compare numbers taking precision into consideration # type: ignore # Compare values Compare if the given attributes of two dictionaries are equal. Args: dict1: dict to be compared dict2: dict to be compared against attributes: list of keys that are going to compare, default None, check all keys in dict1 Returns: True if dict1 attributes are equal with dict2, False otherwise Examples: >>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2}, attributes=['a']) True >>> have_equal_values({'a': 1, 'b': 3}, {'a': 1, 'b': 2}) False Access a nested value in a dict by passing the keys of all the levels. Args: dictionary: Dict object we want to access *attrs: Keys we want to access Returns: The value we want to get or None if it can't be found Raises: AttributeError: When the original object or a nested one is not a dict Examples: >>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'b', 'c') 'Value' >>> get_nested({}, 'a') is None True >>> get_nested({'a': 'b'}, 'c') is None True >>> get_nested({'a': {'b': {'c': 'Value'}}}, 'a', 'd', 'c') is None True >>> get_nested(1, 'a') is None # type: ignore True Find the common elements between 2 lists. Args: list1: list to look for intersection list2: list to be looked against Returns: new list with the intersection of the 2 lists Examples: >>> lists_intersection([1, 2, 3], [2, 3, 4]) [2, 3] >>> lists_intersection([1, 2, 3], [4, 5, 6]) [] >>> lists_intersection([1, 'test', 3], [4, 5, 'test']) ['test'] Yield successive n-sized chunks from lst. Args: lst: list to yield chunks from chunk: size of the chunk Returns: list of elements from generator Examples: >>> a = chunks([1, 2, 3, 4, 5, 6], chunk=2) >>> print(type(a)) <class 'generator'> >>> sum(1 for _ in a) 3 >>> next(chunks([1, 2, 3, 4, 5, 6], chunk=2)) [1, 2] Converts elements of a list to ints Args: values: list of elements to be converted Returns: list with ints representations of elements Examples: >>> ints(['1', '2', '3']) [1, 2, 3] Verify if any of the elements of the iterable is not None. The default behaviour of the builtin any function checks the value with `if element` and causes the values like zero (0) to be treated as Falsy values. This function aims to change this behaviour to return False only when all the values are None. Args: iterable: Iterable in which we will look for not None values Returns: bool value indicating if at least one element is not None Examples: >>> any_not_none([None, None, 13]) True >>> any_not_none([None, None, 0]) True >>> any_not_none([None]) False >>> any_not_none([]) False Finder class to be used for imitating an ORM with dict and objects Examples: >>> records = [{'id': 1, 'foo': 'bar'}, {'id': 2, 'a': 'b'}] >>> Finder(records).find(id=1) {'id': 1, 'foo': 'bar'} >>> Finder(records).filter(foo='bar') [{'id': 1, 'foo': 'bar'}] Verify that the record fulfills the given checks. Args: record: The record to be verified: dict or object ignore_types: If True, compare the values as strings **checks: Dictionary with checks in the form: {"attr__gt": 5} Returns: True if the record passes *all* the checks, False otherwise # If operator has been declared # eq operator by default Get the value of the attribute for the given record. Used to process dicts and objects uniformly. Args: record: The record from which the value will be retrieved. Can be a dict or an object attr: The attribute to retrieve value. For objects, supports syntax like 'asset__debtor__country_id'. Returns: Value found or None | 3.320009 | 3 |
cacreader/pyscard-2.0.2/test/test_SCardGetErrorMessage.py | kyletanyag/LL-Smartcard | 0 | 6614061 | # -*- coding: utf-8 -*-
# to execute:
# $ cd test
# $ python -m unittest
import unittest
from smartcard.scard import SCardGetErrorMessage
from smartcard.scard import SCARD_S_SUCCESS, SCARD_F_INTERNAL_ERROR
from distutils.util import get_platform
class TestError(unittest.TestCase):
def test_SCardGetErrorMessage(self):
res = SCardGetErrorMessage(SCARD_S_SUCCESS)
# do not test on Windows
# the error messages are different and localized
if get_platform() in ('win32', 'win-amd64'):
return
expected = "Command successful."
self.assertEqual(res, expected)
res = SCardGetErrorMessage(SCARD_F_INTERNAL_ERROR)
expected = "Internal error."
self.assertEqual(res, expected)
res = SCardGetErrorMessage(1)
expected = "Unknown error: 0x00000001"
# macOS bug not yet fixed
macos_bug_expected = "Unkown error: 0x00000001"
self.assertIn(res, [expected, macos_bug_expected])
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
# to execute:
# $ cd test
# $ python -m unittest
import unittest
from smartcard.scard import SCardGetErrorMessage
from smartcard.scard import SCARD_S_SUCCESS, SCARD_F_INTERNAL_ERROR
from distutils.util import get_platform
class TestError(unittest.TestCase):
def test_SCardGetErrorMessage(self):
res = SCardGetErrorMessage(SCARD_S_SUCCESS)
# do not test on Windows
# the error messages are different and localized
if get_platform() in ('win32', 'win-amd64'):
return
expected = "Command successful."
self.assertEqual(res, expected)
res = SCardGetErrorMessage(SCARD_F_INTERNAL_ERROR)
expected = "Internal error."
self.assertEqual(res, expected)
res = SCardGetErrorMessage(1)
expected = "Unknown error: 0x00000001"
# macOS bug not yet fixed
macos_bug_expected = "Unkown error: 0x00000001"
self.assertIn(res, [expected, macos_bug_expected])
if __name__ == '__main__':
unittest.main()
| en | 0.64978 | # -*- coding: utf-8 -*- # to execute: # $ cd test # $ python -m unittest # do not test on Windows # the error messages are different and localized # macOS bug not yet fixed | 2.514887 | 3 |
src/dbxdeploy/notebook/NotebooksLocator.py | DataSentics/dbx-deploy | 0 | 6614062 | from pathlib import Path, PurePosixPath
from typing import List
from dbxdeploy.notebook.Notebook import Notebook
from dbxdeploy.notebook.RelativePathResolver import RelativePathResolver
class NotebooksLocator:
def __init__(
self,
projectBaseDir: Path,
relativeBaseDir: str,
pathsPatterns: list,
consumerPathsPatterns: list,
relativePathResolver: RelativePathResolver,
):
self.__projectBaseDir = projectBaseDir
self.__relativeBaseDir = relativeBaseDir
self.__pathsPatterns = pathsPatterns
self.__consumerPathsPatterns = consumerPathsPatterns
self.__relativePathResolver = relativePathResolver
def locate(self) -> List[Notebook]:
return self.__locate(self.__pathsPatterns)
def locateConsumers(self):
return self.__locate(self.__consumerPathsPatterns)
def __locate(self, pathsPatterns: list):
def createNotebook(path: Path):
purePosixPath = PurePosixPath(path.relative_to(self.__projectBaseDir).as_posix())
return Notebook(
path,
path.relative_to(self.__projectBaseDir),
self.__relativePathResolver.resolve(purePosixPath)
)
baseDir = self.__projectBaseDir.joinpath(self.__relativeBaseDir)
filesGrabbed = []
for pathPattern in pathsPatterns:
filesGrabbed.extend(baseDir.glob(pathPattern))
return list(map(createNotebook, filesGrabbed)) # pylint: disable = cell-var-from-loop
| from pathlib import Path, PurePosixPath
from typing import List
from dbxdeploy.notebook.Notebook import Notebook
from dbxdeploy.notebook.RelativePathResolver import RelativePathResolver
class NotebooksLocator:
def __init__(
self,
projectBaseDir: Path,
relativeBaseDir: str,
pathsPatterns: list,
consumerPathsPatterns: list,
relativePathResolver: RelativePathResolver,
):
self.__projectBaseDir = projectBaseDir
self.__relativeBaseDir = relativeBaseDir
self.__pathsPatterns = pathsPatterns
self.__consumerPathsPatterns = consumerPathsPatterns
self.__relativePathResolver = relativePathResolver
def locate(self) -> List[Notebook]:
return self.__locate(self.__pathsPatterns)
def locateConsumers(self):
return self.__locate(self.__consumerPathsPatterns)
def __locate(self, pathsPatterns: list):
def createNotebook(path: Path):
purePosixPath = PurePosixPath(path.relative_to(self.__projectBaseDir).as_posix())
return Notebook(
path,
path.relative_to(self.__projectBaseDir),
self.__relativePathResolver.resolve(purePosixPath)
)
baseDir = self.__projectBaseDir.joinpath(self.__relativeBaseDir)
filesGrabbed = []
for pathPattern in pathsPatterns:
filesGrabbed.extend(baseDir.glob(pathPattern))
return list(map(createNotebook, filesGrabbed)) # pylint: disable = cell-var-from-loop
| en | 0.639297 | # pylint: disable = cell-var-from-loop | 2.198559 | 2 |
src/views/search.py | anthony-chukwuemeka-nwachukwu/Movie-Recommender-System | 0 | 6614063 | <reponame>anthony-chukwuemeka-nwachukwu/Movie-Recommender-System<gh_stars>0
import base64
from operator import index
from flask import Blueprint, jsonify, redirect, render_template, request, url_for
from src import db
from src.models.movie import Movie, movie_schemas
from src.forms.search import SearchForm
from .index import index
from src.param import signed_in, no_of_movie_per_genre, no_imdb_genres
search = Blueprint('search', __name__)
@search.route('/search', methods=['POST', 'GET'])
def search_movie():
form = SearchForm()
if form.validate_on_submit():
#get_movies()
search_term = form.query.data
print(search_term)
return render_template(url_for('home.index', form=form))
return render_template(url_for('home.index', form=form))
#url_for( 'index.home', user_name=session.get('user_id'), genre=genre, id=movie[0], title=movie[1], poster=movie[2], url=movie[3], duration=movie[4], director=movie[5], description=movie[6] )}
def search_movies():
# get movie titles in genres
genres = """SELECT DISTINCT genre FROM genre"""
genres = [g[0] for g in db.session.execute(genres).all()][:no_imdb_genres]
movie_all = {}
genres_query = lambda g: """SELECT DISTINCT movie_id FROM genre WHERE genre='{}'""".format(g)
movie_query = lambda m: """SELECT DISTINCT id, title, poster_address FROM movies WHERE movies.id='{}'""".format(m)
unique_titles = []
for genre in genres:
movies = [db.session.execute(movie_query(mid[0])).all() for mid in db.session.execute(genres_query(genre)).all()]
new_movies = []
for movie in movies:
if movie and movie[0][0] not in unique_titles:
unique_titles.append(movie[0][0])
new_movie = []
for i, m in enumerate(movie[0]):
if i == 2:
#new_movie.append(base64.b64encode(m).decode("utf-8"))
new_movie.append(m)
else:
new_movie.append(m)
new_movies.append(new_movie)
movie_all[genre.capitalize()] = new_movies[:no_of_movie_per_genre]
return movie_all | import base64
from operator import index
from flask import Blueprint, jsonify, redirect, render_template, request, url_for
from src import db
from src.models.movie import Movie, movie_schemas
from src.forms.search import SearchForm
from .index import index
from src.param import signed_in, no_of_movie_per_genre, no_imdb_genres
search = Blueprint('search', __name__)
@search.route('/search', methods=['POST', 'GET'])
def search_movie():
form = SearchForm()
if form.validate_on_submit():
#get_movies()
search_term = form.query.data
print(search_term)
return render_template(url_for('home.index', form=form))
return render_template(url_for('home.index', form=form))
#url_for( 'index.home', user_name=session.get('user_id'), genre=genre, id=movie[0], title=movie[1], poster=movie[2], url=movie[3], duration=movie[4], director=movie[5], description=movie[6] )}
def search_movies():
# get movie titles in genres
genres = """SELECT DISTINCT genre FROM genre"""
genres = [g[0] for g in db.session.execute(genres).all()][:no_imdb_genres]
movie_all = {}
genres_query = lambda g: """SELECT DISTINCT movie_id FROM genre WHERE genre='{}'""".format(g)
movie_query = lambda m: """SELECT DISTINCT id, title, poster_address FROM movies WHERE movies.id='{}'""".format(m)
unique_titles = []
for genre in genres:
movies = [db.session.execute(movie_query(mid[0])).all() for mid in db.session.execute(genres_query(genre)).all()]
new_movies = []
for movie in movies:
if movie and movie[0][0] not in unique_titles:
unique_titles.append(movie[0][0])
new_movie = []
for i, m in enumerate(movie[0]):
if i == 2:
#new_movie.append(base64.b64encode(m).decode("utf-8"))
new_movie.append(m)
else:
new_movie.append(m)
new_movies.append(new_movie)
movie_all[genre.capitalize()] = new_movies[:no_of_movie_per_genre]
return movie_all | en | 0.312727 | #get_movies() #url_for( 'index.home', user_name=session.get('user_id'), genre=genre, id=movie[0], title=movie[1], poster=movie[2], url=movie[3], duration=movie[4], director=movie[5], description=movie[6] )} # get movie titles in genres SELECT DISTINCT genre FROM genre SELECT DISTINCT movie_id FROM genre WHERE genre='{}' SELECT DISTINCT id, title, poster_address FROM movies WHERE movies.id='{}' #new_movie.append(base64.b64encode(m).decode("utf-8")) | 2.565599 | 3 |
testPyWavelets.py | CaptainEven/PyScripts | 5 | 6614064 | <reponame>CaptainEven/PyScripts<gh_stars>1-10
# # -*- coding: utf-8 -*-
# import numpy as np
# import matplotlib.pyplot as plt
# import pywt
# import pywt.data
# # Load image
# original = pywt.data.aero()
# print('original.shape:',original.shape)
# # Wavelet transform of image, and plot approximation and details
# titles = ['Approximation', ' Horizontal detail',
# 'Vertical detail', 'Diagonal detail']
# coeffs2 = pywt.dwt2(original, 'bior1.3')
# LL, (LH, HL, HH) = coeffs2
# fig = plt.figure(figsize=(8,8))
# for i, a in enumerate([LL, LH, HL, HH]):
# ax = fig.add_subplot(2, 2, i + 1)
# ax.imshow(a, origin='image', interpolation="nearest", cmap=plt.cm.gray)
# ax.set_title(titles[i], fontsize=12)
# fig.suptitle("dwt2 coefficients", fontsize=14)
# # Now reconstruct and plot the original image
# reconstructed = pywt.idwt2(coeffs2, 'bior1.3')
# fig = plt.figure(figsize=(8 ,8))
# plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray)
# # Check that reconstructed image is close to the original
# np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13)
# # Now do the same with dwtn/idwtn, to show the difference in their signatures
# coeffsn = pywt.dwtn(original, 'bior1.3')
# fig = plt.figure(figsize = (8, 8))
# for i, key in enumerate(['aa', 'ad', 'da', 'dd']):
# ax = fig.add_subplot(2, 2, i + 1)
# ax.imshow(coeffsn[key], origin='image', interpolation="nearest",
# cmap=plt.cm.gray)
# ax.set_title(titles[i], fontsize=12)
# fig.suptitle("dwtn coefficients", fontsize=14)
# # Now reconstruct and plot the original image
# reconstructed = pywt.idwtn(coeffsn, 'bior1.3')
# fig = plt.figure(figsize = (8, 8))
# plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray)
# # Check that reconstructed image is close to the original
# np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13)
# plt.show()
# -*- coding: cp936 -*-
import pywt
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from statsmodels.robust import stand_mad
wavtag = 'db8'
# #===============================================================================
# # 图1:绘出Haar小波母函数
# #===============================================================================
# # 这里不是“函数调用”,二是“对象声明和创建”
# # 创建了一个pywt.Wavelet类,用以描述小波母函数的各种性质
# w = pywt.Wavelet('Haar')
# # 调用Wavefun()成员函数,返回:
# # phi - scaling function 尺度函数
# # psi - wavelet function 母函数
# phi, psi, x = w.wavefun(level=10)
# # 注意,此处采用“面对对象”的方式使用matplotlib
# # 而不是“状态机”的方式
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.set_xlim(-0.02, 1.02)
# ax.plot(x, psi)
# ax.grid(True)
# plt.show()
# #===============================================================================
# # 图2:Debauchies小波的尺度函数和母函数
# #===============================================================================
# db8 = pywt.Wavelet(wavtag)
# scaling, wavelet, x = db8.wavefun()
# fig = plt.figure(2)
# ax1 = fig.add_subplot(121)
# ax1.plot(x, scaling)
# ax1.set_title('Scaling function,' + wavtag)
# ax1.set_ylim(-1.2, 1.2)
# ax1.grid(True)
# ax2 = fig.add_subplot(122, sharey=ax1)
# ax2.set_title('Wavelet,' + wavtag)
# ax2.plot(x, wavelet)
# ax2.tick_params(labelleft=False)
# ax2.grid(True)
# plt.tight_layout()
# plt.show()
#===============================================================================
# 图3:小波去噪模拟,原始信号和混合噪声的信号
#===============================================================================
def Blocks(x):
K = lambda x: (1.0 + np.sign(x)) / 2.0
t = np.array(
[[0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44, 0.65, 0.76, 0.78, 0.81]]).T
h = np.array([[4, -5, 3, -4, 5, -4.2, 2.1, 4.3, -3.1, 2.1, -4.2]]).T
return 3.655606 * np.sum(h * K(x - t), axis=0)
def bumps(x):
K = lambda x: (1.0 + np.abs(x)) ** -4.0
t = np.array([[.1, .13, .15, .23, .25, .4, .44, .65, .76, .78, .81]]).T
h = np.array([[4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 2.1, 4.2]]).T
w = np.array(
[[.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005]]).T
return np.sum(h * K((x - t) / w), axis=0)
# 构造原始数据
x = np.linspace(0, 1, 2**15)
blk = bumps(x)
print('blk:\n', blk)
# 构造含噪声的数据
np.random.seed(12345)
nblk = blk + stats.norm().rvs(2**15) * 0.3
fig = plt.figure(3)
ax31 = fig.add_subplot(211)
ax31.plot(x, blk)
ax31.grid(True)
ax31.set_title('Original Data')
ax31.tick_params(labelbottom=False)
ax32 = fig.add_subplot(212)
ax32.plot(x, nblk)
ax32.grid(True)
ax32.set_title('Noisy Data')
plt.show()
#===============================================================================
# 图4,5:小波分析,及数据展示
#===============================================================================
def coef_pyramid_plot(coefs, first=0, scale='uniform', ax=None):
'''
Parameters
----------
coefs : array-like
Wavelet Coefficients. Expects an iterable in order Cdn, Cdn-1, ...,
Cd1, Cd0.
first : int, optional
The first level to plot.
scale : str {'uniform', 'level'}, optional
Scale the coefficients using the same scale or independently by
level.
ax : Axes, optional
Matplotlib Axes instance
Returns
-------
Figure : Matplotlib figure instance
Either the parent figure of `ax` or a new pyplot.Figure instance if
`ax` is None.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='lightgrey')
else:
fig = ax.figure
n_levels = len(coefs)
n = 2**(n_levels - 1) # assumes periodic
if scale == 'uniform':
biggest = [np.max(np.abs(np.hstack(coefs)))] * n_levels
else:
# multiply by 2 so the highest bars only take up .5
biggest = [np.max(np.abs(i)) * 2 for i in coefs]
for i in range(first, n_levels):
x = np.linspace(2**(n_levels - 2 - i), n - 2**(n_levels - 2 - i), 2**i)
ymin = n_levels - i - 1 + first
yheight = coefs[i] / biggest[i]
ymax = yheight + ymin
ax.vlines(x, ymin, ymax, linewidth=1.1)
ax.set_xlim(0, n)
ax.set_ylim(first - 1, n_levels)
ax.yaxis.set_ticks(np.arange(n_levels - 1, first - 1, -1))
ax.yaxis.set_ticklabels(np.arange(first, n_levels))
ax.tick_params(top=False, right=False, direction='out', pad=6)
ax.set_ylabel("Levels", fontsize=14)
ax.grid(True, alpha=.85, color='white', axis='y', linestyle='-')
ax.set_title('Wavelet Detail Coefficients',
fontsize=16, position=(.5, 1.05))
fig.subplots_adjust(top=.89)
return fig
fig = plt.figure(4)
ax4 = fig.add_subplot(111, axisbg='lightgrey')
fig = plt.figure(5)
ax5 = fig.add_subplot(111, axisbg='lightgrey')
# 调用wavedec()函数对数据进行小波变换
# mode指定了数据补齐的方式
#‘per’指周期延拓数据
true_coefs = pywt.wavedec(blk, wavtag, level=11, mode='per')
noisy_coefs = pywt.wavedec(nblk, wavtag, level=11, mode='per')
# 绘出‘coefficient pyramid’
# 注意,这里只绘出了detail coefficients
# 而没有展示approximation coefficient(s),该数据存在true_coefs[0]中
fig1 = coef_pyramid_plot(true_coefs[1:], scale='level', ax=ax4)
fig1.axes[0].set_title('Original Wavelet Detail Coefficients')
fig2 = coef_pyramid_plot(noisy_coefs[1:], scale='level', ax=ax5)
fig2.axes[0].set_title('Noisy Wavelet Detail Coefficients')
plt.show()
#===============================================================================
# 图6:降噪——全局阈值
# 图7:重构数据——对比效果
#===============================================================================
sigma = stand_mad(noisy_coefs[-1])
uthresh = sigma * np.sqrt(2.0 * np.log(len(nblk)))
denoised_coefs = noisy_coefs[:]
denoised_coefs[1:] = (pywt._thresholding.soft(data, value=uthresh)
for data in denoised_coefs[1:])
fig = plt.figure(6)
ax6 = fig.add_subplot(111, axisbg='lightgrey')
fig3 = coef_pyramid_plot(denoised_coefs[1:], scale='level', ax=ax6)
fig3.axes[0].set_title('Denoised Wavelet Detail Coefficients')
signal = pywt.waverec(denoised_coefs, wavtag, mode='per')
fig = plt.figure(7)
ax71 = fig.add_subplot(211)
ax71.plot(x, nblk)
ax71.grid(True)
ax71.set_title('Noisy Data')
ax71.tick_params(labelbottom=False)
ax72 = fig.add_subplot(212)
ax72.plot(x, signal, label='Denoised')
ax72.plot(x, blk, color='red', lw=0.5, label='Original')
ax72.grid(True)
ax72.set_title('Denoised Data')
ax72.legend()
plt.show()
# 安装opencv-python: http://www.lfd.uci.edu/~gohlke/pythonlibs/
| # # -*- coding: utf-8 -*-
# import numpy as np
# import matplotlib.pyplot as plt
# import pywt
# import pywt.data
# # Load image
# original = pywt.data.aero()
# print('original.shape:',original.shape)
# # Wavelet transform of image, and plot approximation and details
# titles = ['Approximation', ' Horizontal detail',
# 'Vertical detail', 'Diagonal detail']
# coeffs2 = pywt.dwt2(original, 'bior1.3')
# LL, (LH, HL, HH) = coeffs2
# fig = plt.figure(figsize=(8,8))
# for i, a in enumerate([LL, LH, HL, HH]):
# ax = fig.add_subplot(2, 2, i + 1)
# ax.imshow(a, origin='image', interpolation="nearest", cmap=plt.cm.gray)
# ax.set_title(titles[i], fontsize=12)
# fig.suptitle("dwt2 coefficients", fontsize=14)
# # Now reconstruct and plot the original image
# reconstructed = pywt.idwt2(coeffs2, 'bior1.3')
# fig = plt.figure(figsize=(8 ,8))
# plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray)
# # Check that reconstructed image is close to the original
# np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13)
# # Now do the same with dwtn/idwtn, to show the difference in their signatures
# coeffsn = pywt.dwtn(original, 'bior1.3')
# fig = plt.figure(figsize = (8, 8))
# for i, key in enumerate(['aa', 'ad', 'da', 'dd']):
# ax = fig.add_subplot(2, 2, i + 1)
# ax.imshow(coeffsn[key], origin='image', interpolation="nearest",
# cmap=plt.cm.gray)
# ax.set_title(titles[i], fontsize=12)
# fig.suptitle("dwtn coefficients", fontsize=14)
# # Now reconstruct and plot the original image
# reconstructed = pywt.idwtn(coeffsn, 'bior1.3')
# fig = plt.figure(figsize = (8, 8))
# plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray)
# # Check that reconstructed image is close to the original
# np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13)
# plt.show()
# -*- coding: cp936 -*-
import pywt
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from statsmodels.robust import stand_mad
wavtag = 'db8'
# #===============================================================================
# # 图1:绘出Haar小波母函数
# #===============================================================================
# # 这里不是“函数调用”,二是“对象声明和创建”
# # 创建了一个pywt.Wavelet类,用以描述小波母函数的各种性质
# w = pywt.Wavelet('Haar')
# # 调用Wavefun()成员函数,返回:
# # phi - scaling function 尺度函数
# # psi - wavelet function 母函数
# phi, psi, x = w.wavefun(level=10)
# # 注意,此处采用“面对对象”的方式使用matplotlib
# # 而不是“状态机”的方式
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.set_xlim(-0.02, 1.02)
# ax.plot(x, psi)
# ax.grid(True)
# plt.show()
# #===============================================================================
# # 图2:Debauchies小波的尺度函数和母函数
# #===============================================================================
# db8 = pywt.Wavelet(wavtag)
# scaling, wavelet, x = db8.wavefun()
# fig = plt.figure(2)
# ax1 = fig.add_subplot(121)
# ax1.plot(x, scaling)
# ax1.set_title('Scaling function,' + wavtag)
# ax1.set_ylim(-1.2, 1.2)
# ax1.grid(True)
# ax2 = fig.add_subplot(122, sharey=ax1)
# ax2.set_title('Wavelet,' + wavtag)
# ax2.plot(x, wavelet)
# ax2.tick_params(labelleft=False)
# ax2.grid(True)
# plt.tight_layout()
# plt.show()
#===============================================================================
# 图3:小波去噪模拟,原始信号和混合噪声的信号
#===============================================================================
def Blocks(x):
K = lambda x: (1.0 + np.sign(x)) / 2.0
t = np.array(
[[0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44, 0.65, 0.76, 0.78, 0.81]]).T
h = np.array([[4, -5, 3, -4, 5, -4.2, 2.1, 4.3, -3.1, 2.1, -4.2]]).T
return 3.655606 * np.sum(h * K(x - t), axis=0)
def bumps(x):
K = lambda x: (1.0 + np.abs(x)) ** -4.0
t = np.array([[.1, .13, .15, .23, .25, .4, .44, .65, .76, .78, .81]]).T
h = np.array([[4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 2.1, 4.2]]).T
w = np.array(
[[.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005]]).T
return np.sum(h * K((x - t) / w), axis=0)
# 构造原始数据
x = np.linspace(0, 1, 2**15)
blk = bumps(x)
print('blk:\n', blk)
# 构造含噪声的数据
np.random.seed(12345)
nblk = blk + stats.norm().rvs(2**15) * 0.3
fig = plt.figure(3)
ax31 = fig.add_subplot(211)
ax31.plot(x, blk)
ax31.grid(True)
ax31.set_title('Original Data')
ax31.tick_params(labelbottom=False)
ax32 = fig.add_subplot(212)
ax32.plot(x, nblk)
ax32.grid(True)
ax32.set_title('Noisy Data')
plt.show()
#===============================================================================
# 图4,5:小波分析,及数据展示
#===============================================================================
def coef_pyramid_plot(coefs, first=0, scale='uniform', ax=None):
'''
Parameters
----------
coefs : array-like
Wavelet Coefficients. Expects an iterable in order Cdn, Cdn-1, ...,
Cd1, Cd0.
first : int, optional
The first level to plot.
scale : str {'uniform', 'level'}, optional
Scale the coefficients using the same scale or independently by
level.
ax : Axes, optional
Matplotlib Axes instance
Returns
-------
Figure : Matplotlib figure instance
Either the parent figure of `ax` or a new pyplot.Figure instance if
`ax` is None.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='lightgrey')
else:
fig = ax.figure
n_levels = len(coefs)
n = 2**(n_levels - 1) # assumes periodic
if scale == 'uniform':
biggest = [np.max(np.abs(np.hstack(coefs)))] * n_levels
else:
# multiply by 2 so the highest bars only take up .5
biggest = [np.max(np.abs(i)) * 2 for i in coefs]
for i in range(first, n_levels):
x = np.linspace(2**(n_levels - 2 - i), n - 2**(n_levels - 2 - i), 2**i)
ymin = n_levels - i - 1 + first
yheight = coefs[i] / biggest[i]
ymax = yheight + ymin
ax.vlines(x, ymin, ymax, linewidth=1.1)
ax.set_xlim(0, n)
ax.set_ylim(first - 1, n_levels)
ax.yaxis.set_ticks(np.arange(n_levels - 1, first - 1, -1))
ax.yaxis.set_ticklabels(np.arange(first, n_levels))
ax.tick_params(top=False, right=False, direction='out', pad=6)
ax.set_ylabel("Levels", fontsize=14)
ax.grid(True, alpha=.85, color='white', axis='y', linestyle='-')
ax.set_title('Wavelet Detail Coefficients',
fontsize=16, position=(.5, 1.05))
fig.subplots_adjust(top=.89)
return fig
fig = plt.figure(4)
ax4 = fig.add_subplot(111, axisbg='lightgrey')
fig = plt.figure(5)
ax5 = fig.add_subplot(111, axisbg='lightgrey')
# 调用wavedec()函数对数据进行小波变换
# mode指定了数据补齐的方式
#‘per’指周期延拓数据
true_coefs = pywt.wavedec(blk, wavtag, level=11, mode='per')
noisy_coefs = pywt.wavedec(nblk, wavtag, level=11, mode='per')
# 绘出‘coefficient pyramid’
# 注意,这里只绘出了detail coefficients
# 而没有展示approximation coefficient(s),该数据存在true_coefs[0]中
fig1 = coef_pyramid_plot(true_coefs[1:], scale='level', ax=ax4)
fig1.axes[0].set_title('Original Wavelet Detail Coefficients')
fig2 = coef_pyramid_plot(noisy_coefs[1:], scale='level', ax=ax5)
fig2.axes[0].set_title('Noisy Wavelet Detail Coefficients')
plt.show()
#===============================================================================
# 图6:降噪——全局阈值
# 图7:重构数据——对比效果
#===============================================================================
sigma = stand_mad(noisy_coefs[-1])
uthresh = sigma * np.sqrt(2.0 * np.log(len(nblk)))
denoised_coefs = noisy_coefs[:]
denoised_coefs[1:] = (pywt._thresholding.soft(data, value=uthresh)
for data in denoised_coefs[1:])
fig = plt.figure(6)
ax6 = fig.add_subplot(111, axisbg='lightgrey')
fig3 = coef_pyramid_plot(denoised_coefs[1:], scale='level', ax=ax6)
fig3.axes[0].set_title('Denoised Wavelet Detail Coefficients')
signal = pywt.waverec(denoised_coefs, wavtag, mode='per')
fig = plt.figure(7)
ax71 = fig.add_subplot(211)
ax71.plot(x, nblk)
ax71.grid(True)
ax71.set_title('Noisy Data')
ax71.tick_params(labelbottom=False)
ax72 = fig.add_subplot(212)
ax72.plot(x, signal, label='Denoised')
ax72.plot(x, blk, color='red', lw=0.5, label='Original')
ax72.grid(True)
ax72.set_title('Denoised Data')
ax72.legend()
plt.show()
# 安装opencv-python: http://www.lfd.uci.edu/~gohlke/pythonlibs/ | en | 0.286008 | # # -*- coding: utf-8 -*- # import numpy as np # import matplotlib.pyplot as plt # import pywt # import pywt.data # # Load image # original = pywt.data.aero() # print('original.shape:',original.shape) # # Wavelet transform of image, and plot approximation and details # titles = ['Approximation', ' Horizontal detail', # 'Vertical detail', 'Diagonal detail'] # coeffs2 = pywt.dwt2(original, 'bior1.3') # LL, (LH, HL, HH) = coeffs2 # fig = plt.figure(figsize=(8,8)) # for i, a in enumerate([LL, LH, HL, HH]): # ax = fig.add_subplot(2, 2, i + 1) # ax.imshow(a, origin='image', interpolation="nearest", cmap=plt.cm.gray) # ax.set_title(titles[i], fontsize=12) # fig.suptitle("dwt2 coefficients", fontsize=14) # # Now reconstruct and plot the original image # reconstructed = pywt.idwt2(coeffs2, 'bior1.3') # fig = plt.figure(figsize=(8 ,8)) # plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray) # # Check that reconstructed image is close to the original # np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13) # # Now do the same with dwtn/idwtn, to show the difference in their signatures # coeffsn = pywt.dwtn(original, 'bior1.3') # fig = plt.figure(figsize = (8, 8)) # for i, key in enumerate(['aa', 'ad', 'da', 'dd']): # ax = fig.add_subplot(2, 2, i + 1) # ax.imshow(coeffsn[key], origin='image', interpolation="nearest", # cmap=plt.cm.gray) # ax.set_title(titles[i], fontsize=12) # fig.suptitle("dwtn coefficients", fontsize=14) # # Now reconstruct and plot the original image # reconstructed = pywt.idwtn(coeffsn, 'bior1.3') # fig = plt.figure(figsize = (8, 8)) # plt.imshow(reconstructed, interpolation="nearest", cmap=plt.cm.gray) # # Check that reconstructed image is close to the original # np.testing.assert_allclose(original, reconstructed, atol=1e-13, rtol=1e-13) # plt.show() # -*- coding: cp936 -*- # #=============================================================================== # # 图1:绘出Haar小波母函数 # #=============================================================================== # # 这里不是“函数调用”,二是“对象声明和创建” # # 创建了一个pywt.Wavelet类,用以描述小波母函数的各种性质 # w = pywt.Wavelet('Haar') # # 调用Wavefun()成员函数,返回: # # phi - scaling function 尺度函数 # # psi - wavelet function 母函数 # phi, psi, x = w.wavefun(level=10) # # 注意,此处采用“面对对象”的方式使用matplotlib # # 而不是“状态机”的方式 # fig = plt.figure() # ax = fig.add_subplot(111) # ax.set_xlim(-0.02, 1.02) # ax.plot(x, psi) # ax.grid(True) # plt.show() # #=============================================================================== # # 图2:Debauchies小波的尺度函数和母函数 # #=============================================================================== # db8 = pywt.Wavelet(wavtag) # scaling, wavelet, x = db8.wavefun() # fig = plt.figure(2) # ax1 = fig.add_subplot(121) # ax1.plot(x, scaling) # ax1.set_title('Scaling function,' + wavtag) # ax1.set_ylim(-1.2, 1.2) # ax1.grid(True) # ax2 = fig.add_subplot(122, sharey=ax1) # ax2.set_title('Wavelet,' + wavtag) # ax2.plot(x, wavelet) # ax2.tick_params(labelleft=False) # ax2.grid(True) # plt.tight_layout() # plt.show() #=============================================================================== # 图3:小波去噪模拟,原始信号和混合噪声的信号 #=============================================================================== # 构造原始数据 # 构造含噪声的数据 #=============================================================================== # 图4,5:小波分析,及数据展示 #=============================================================================== Parameters ---------- coefs : array-like Wavelet Coefficients. Expects an iterable in order Cdn, Cdn-1, ..., Cd1, Cd0. first : int, optional The first level to plot. scale : str {'uniform', 'level'}, optional Scale the coefficients using the same scale or independently by level. ax : Axes, optional Matplotlib Axes instance Returns ------- Figure : Matplotlib figure instance Either the parent figure of `ax` or a new pyplot.Figure instance if `ax` is None. # assumes periodic # multiply by 2 so the highest bars only take up .5 # 调用wavedec()函数对数据进行小波变换 # mode指定了数据补齐的方式 #‘per’指周期延拓数据 # 绘出‘coefficient pyramid’ # 注意,这里只绘出了detail coefficients # 而没有展示approximation coefficient(s),该数据存在true_coefs[0]中 #=============================================================================== # 图6:降噪——全局阈值 # 图7:重构数据——对比效果 #=============================================================================== # 安装opencv-python: http://www.lfd.uci.edu/~gohlke/pythonlibs/ | 2.530137 | 3 |
cohen/properties/models.py | rodrigobraga/cohen | 0 | 6614065 | <reponame>rodrigobraga/cohen<filename>cohen/properties/models.py
# coding: utf-8
"""Models to Property"""
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.contrib.gis.db.models import PointField
from .tasks import update_coordinates
PROPERTY_TYPE = (
('house', 'House'),
('apartment', 'Apartment/Flat'),
('townhouse', 'Townhouse'),
('commercial', 'Commercial/Industrial')
)
class Property(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
slug = models.SlugField(unique=True)
address = models.CharField(max_length=255)
point = PointField(null=True, blank=True, srid=4326)
image = models.ImageField(upload_to='properties')
is_available = models.BooleanField(default=True)
price = models.DecimalField(max_digits=8, decimal_places=2)
property_type = models.CharField(
max_length=24, choices=PROPERTY_TYPE, default='house')
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_date', '-updated_date', 'title']
verbose_name_plural = 'properties'
def __str__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('property-update', (), {'slug': self.slug})
@receiver(post_save, sender=Property)
def get_coordinates(sender, instance, **kwargs):
update_coordinates.delay(instance.id, instance.address)
| # coding: utf-8
"""Models to Property"""
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.contrib.gis.db.models import PointField
from .tasks import update_coordinates
PROPERTY_TYPE = (
('house', 'House'),
('apartment', 'Apartment/Flat'),
('townhouse', 'Townhouse'),
('commercial', 'Commercial/Industrial')
)
class Property(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
slug = models.SlugField(unique=True)
address = models.CharField(max_length=255)
point = PointField(null=True, blank=True, srid=4326)
image = models.ImageField(upload_to='properties')
is_available = models.BooleanField(default=True)
price = models.DecimalField(max_digits=8, decimal_places=2)
property_type = models.CharField(
max_length=24, choices=PROPERTY_TYPE, default='house')
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-created_date', '-updated_date', 'title']
verbose_name_plural = 'properties'
def __str__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('property-update', (), {'slug': self.slug})
@receiver(post_save, sender=Property)
def get_coordinates(sender, instance, **kwargs):
update_coordinates.delay(instance.id, instance.address) | en | 0.796673 | # coding: utf-8 Models to Property | 1.86698 | 2 |
getIP.py | youngphero/pynet | 0 | 6614066 | #!Python2.7
#This script to get an IP address from a text file and wrrite it to another textfile
import re
#Step1: Get the opent the text file and get the text
hand=open('OptumIP.txt')
#Step2: create the regex to extract the IP
#GetIP=re.complie(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[1-9]{1,3}')
#Step3: find all the matches in the text file
for line in hand:
line=line.rstrip()
ip=re.findall('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[1-9]{1,3}',line)
if len(ip)>0:
print ip
#step4: Write the results to a text file
| #!Python2.7
#This script to get an IP address from a text file and wrrite it to another textfile
import re
#Step1: Get the opent the text file and get the text
hand=open('OptumIP.txt')
#Step2: create the regex to extract the IP
#GetIP=re.complie(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[1-9]{1,3}')
#Step3: find all the matches in the text file
for line in hand:
line=line.rstrip()
ip=re.findall('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[1-9]{1,3}',line)
if len(ip)>0:
print ip
#step4: Write the results to a text file
| en | 0.654215 | #!Python2.7 #This script to get an IP address from a text file and wrrite it to another textfile #Step1: Get the opent the text file and get the text #Step2: create the regex to extract the IP #GetIP=re.complie(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[1-9]{1,3}') #Step3: find all the matches in the text file #step4: Write the results to a text file | 3.847646 | 4 |
kafka_rpc/client.py | zylo117/krpc | 11 | 6614067 | # Copyright (c) 2017-2020, <NAME>
# All rights reserved.
"""
Update Log:
1.0.1: init
1.0.2: change project name from krpc to kafka_rpc
1.0.4: allow increasing message_max_bytes
1.0.5: support subscribing to multiple topics
1.0.6: stop using a global packer or unpacker, to ensure thread safety.
1.0.8: use gevent instead of built-in threading to speed up about 40%
1.0.9: support message compression
1.0.10: add argument max_queue_len to control the length of QueueDict
1.0.11: change redis backend behavior
"""
import logging
import pickle
import time
import uuid
import zlib
from collections import deque
from typing import Callable, Union
import zstd
logger = logging.getLogger(__name__)
from confluent_kafka import Producer, Consumer, KafkaError
import msgpack
import msgpack_numpy
msgpack_numpy.patch() # add numpy array support for msgpack
from kafka_rpc.aes import AESEncryption
class KRPCClient:
def __init__(self, *addresses, topic_name: Union[str, list],
max_polling_timeout: float = 0.001, **kwargs):
"""
Init Kafka RPCClient.
Not like the most of the RPC protocols,
Only one KRPCClient can run on a single Kafka topic.
If you insist using multiple KRPCClient instances,
redis must be used, pass argument use_redis=True.
Args:
addresses: kafka broker host, port, for examples: '192.168.1.117:9092'
topic_name: kafka topic_name(s), if topic exists,
the existing topic will be used,
create a new topic otherwise.
max_polling_timeout: maximum time(seconds) to block waiting for message, event or callback.
encrypt: default None, if not None, will encrypt the message with the given password. It will slow down performance.
verify: default False, if True, will verify the message with the given sha3 checksum from the headers.
use_redis: default False, if True, use redis as cache, built-in QueueDict otherwise.
ack: default False, if True, server will confirm the message status. Disable ack will double the speed, but not exactly safe.
use_gevent: default True, if True, use gevent instead of asyncio. If gevent version is lower than 1.5, krpc will not run on windows.
compression: default 'none', check https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md compression.codec. 'zstd' is bugged. Check https://github.com/confluentinc/confluent-kafka-python/issues/589
use_compression: default False, custom compression using zstd.
max_queue_len: int, default 1024, if use_redis is False, a QueueDict will cache results with the length of max_queue_len. This should be as low as it can be, otherwise OOM.
"""
bootstrap_servers = ','.join(addresses)
assert isinstance(topic_name, str) or isinstance(topic_name, list)
self.topic_names = [topic_name] if isinstance(topic_name, str) else topic_name
# self.server_topics = ['krpc_{}_server'.format(topic_name) for topic_name in self.topic_names]
self.client_topics = ['krpc_{}_client'.format(topic_name) for topic_name in self.topic_names]
# set max_polling_timeout
assert max_polling_timeout > 0, 'max_polling_timeout must be greater than 0'
self.max_polling_timeout = max_polling_timeout
self.consumer = Consumer({
'bootstrap.servers': bootstrap_servers,
'group.id': 'krpc',
'auto.offset.reset': 'earliest',
'auto.commit.interval.ms': 1000,
'compression.codec': kwargs.get('compression_codec', 'none')
})
# message_max_bytes = kwargs.get('message_max_bytes', 1048576),
# queue_buffering_max_kbytes = kwargs.get('queue_buffering_max_kbytes', 1048576),
# queue_buffering_max_messages = kwargs.get('queue_buffering_max_messages', 100000),
try:
message_max_bytes = kwargs['message_max_bytes']
except KeyError:
message_max_bytes = 1048576
try:
queue_buffering_max_kbytes = kwargs['queue_buffering_max_kbytes']
except KeyError:
queue_buffering_max_kbytes = 1048576
try:
queue_buffering_max_messages = kwargs['queue_buffering_max_messages']
except KeyError:
queue_buffering_max_messages = 100000
if message_max_bytes > 1048576:
logger.warning('message_max_bytes is greater than 1048576, '
'message.max.bytes and replica.fetch.max.bytes of '
'brokers\' config should be greater than this')
self.producer = Producer({
'bootstrap.servers': bootstrap_servers,
'on_delivery': self.delivery_report,
# custom parameters
'message.max.bytes': message_max_bytes,
'queue.buffering.max.kbytes': queue_buffering_max_kbytes,
'queue.buffering.max.messages': queue_buffering_max_messages,
'compression.codec': kwargs.get('compression_codec', 'none')
})
# add redis cache, for temporarily storage of returned data
self.use_redis = kwargs.get('use_redis', False)
self.expire_time = kwargs.get('expire_time', 600)
if self.use_redis:
import redis
redis_host = kwargs.get('redis_host', 'localhost')
redis_port = kwargs.get('redis_port', 6379)
redis_db = kwargs.get('redis_db', 0)
redis_password = kwargs.get('redis_password', None)
self.cache = redis.Redis(redis_host, redis_port, redis_db, redis_password)
self.cache_channel = self.cache.pubsub()
else:
self.cache = QueueDict(maxlen=kwargs.get('max_queue_len', 1024), expire=self.expire_time)
# set msgpack packer & unpacker, stop using a global packer or unpacker, to ensure thread safety.
# self.packer = msgpack.Packer(use_bin_type=True)
# self.unpacker = msgpack.Unpacker(use_list=False, raw=False)
self.verify = kwargs.get('verify', False)
self.verification_method = kwargs.get('verification', 'crc32')
if self.verification_method == 'crc32':
self.verification_method = lambda x: hex(zlib.crc32(x)).encode()
elif isinstance(self.verification_method, Callable):
self.verification_method = self.verification_method
else:
raise AssertionError('not supported verification function.')
self.encrypt = kwargs.get('encrypt', None)
if self.encrypt is not None:
self.encrypt = AESEncryption(self.encrypt, encrypt_length=16)
self.use_compression = kwargs.get('use_compression', False)
self.is_closed = False
# coroutine pool
use_gevent = kwargs.get('use_gevent', True)
if use_gevent:
from gevent.threadpool import ThreadPoolExecutor as gThreadPoolExecutor
self.pool = gThreadPoolExecutor(1)
else:
from aplex import ThreadAsyncPoolExecutor
self.pool = ThreadAsyncPoolExecutor(pool_size=1)
self.pool.submit(self.wait_forever)
# handshake, if's ok not to handshake, but the first rpc would be slow.
if kwargs.get('handshake', True):
self.handshaked = {}
self.subscribe(*self.topic_names)
# acknowledge, disable ack will double the speed, but not exactly safe.
self.ack = kwargs.get('ack', False)
def subscribe(self, *topic_names):
if not topic_names:
return
for topic_name in topic_names:
client_topic = 'krpc_{}_client'.format(topic_name)
self.topic_names.append(topic_name)
self.client_topics.append(client_topic)
self.consumer.subscribe(self.client_topics)
logger.info('adding consumer subscription of: {}'.format(topic_names))
if hasattr(self, 'handshaked'):
for topic_name in topic_names:
self.handshaked[topic_name] = False
server_topic = 'krpc_{}_server'.format(topic_name)
self.producer.produce(server_topic, b'handshake', b'handshake',
headers={
'checksum': None
})
self.producer.poll(0.0)
logger.info('sending handshake to {}'.format(server_topic))
for i in range(15):
if self.handshaked[topic_name]:
logger.info('handshake of {} succeeded.'.format(topic_name))
break
time.sleep(2)
else:
logger.error('failed to handshake with {}'.format(server_topic))
@staticmethod
def delivery_report(err, msg):
if err is not None:
logger.error('request failed: {}'.format(err))
else:
logger.info('request sent to {} [{}]'.format(msg.topic(), msg.partition()))
@staticmethod
def parse_response(msg_value):
try:
res = msgpack.unpackb(msg_value, use_list=False, raw=False)
except Exception as e:
logger.exception(e)
res = None
return res
def call(self, method_name, *args, **kwargs):
# rpc call timeout
# WARNING: if the rpc method has an argument named timeout, it will be not be passed.
timeout = kwargs.pop('timeout', 10)
# get topic_name
topic_name = kwargs.pop('topic_name', self.topic_names[0])
server_topic = 'krpc_{}_server'.format(topic_name)
start_time = time.time()
# send request back to server
req = {
'method_name': method_name,
'args': args,
'kwargs': kwargs
}
req = msgpack.packb(req, use_bin_type=True)
if self.use_compression:
req = zstd.compress(req)
if self.encrypt:
req = self.encrypt.encrypt(req)
if self.verify:
checksum = self.verification_method(req)
else:
checksum = None
task_id = uuid.uuid4().hex
self.producer.produce(server_topic, req, task_id,
headers={
'checksum': checksum
})
# waiting for response from server sync/async
res, flight_time_response = self.poll_result_from_cache(task_id, timeout)
if self.ack:
self.producer.poll(0.0)
# do something to the response
ret = res['ret']
tact_time_server = res['tact_time']
flight_time_request = res['flight_time_request']
server_id = res['server_id']
exception = res['exception']
tb = res['traceback']
if exception is not None:
exception = pickle.loads(exception)
logger.exception(exception)
if tb is not None:
logger.error(tb)
end_time = time.time()
return {
'ret': ret,
'tact_time': end_time - start_time,
'tact_time_server': tact_time_server,
'server_id': server_id,
'flight_time_request': flight_time_request,
'flight_time_response': flight_time_response
}
def wait_forever(self):
while True:
if self.is_closed:
logger.info('user exit')
break
try:
msg = self.consumer.poll(self.max_polling_timeout)
if msg is None:
continue
if msg.error():
logger.error("consumer error: {}".format(msg.error()))
continue
task_id = msg.key() # an uuid, the only id that pairs the request and the response
topic_name = msg.topic()
if task_id == b'handshake':
try:
real_topic_name = '_'.join(topic_name.split('_')[1:-1])
except:
logger.error('invalid topic name {}'.format(topic_name))
continue
self.handshaked[real_topic_name] = True
continue
res = msg.value()
headers = msg.headers()
timestamp = msg.timestamp()
checksum = headers[0][1]
if self.verify:
signature = self.verification_method(res)
if checksum != signature:
logger.error('checksum mismatch of task {}'.format(task_id))
continue
if self.use_redis:
self.cache.hset(task_id, b'result', res)
self.cache.hset(task_id, b'flight_time_response', time.time() - timestamp[1] / 1000)
self.cache.expire(task_id, self.expire_time)
else:
self.cache[task_id] = res, time.time() - timestamp[1] / 1000
# send signal for polling to search for result
...
except Exception as e:
logger.exception(e)
def poll_result_from_cache(self, task_id, timeout=10):
"""
poll_result_from_cache after receiving a signal from waiting
Args:
task_id:
timeout:
Returns:
"""
loop_times = int(timeout / self.max_polling_timeout)
task_id = task_id.encode()
if self.use_redis:
for _ in range(loop_times):
res_exists = self.cache.hexists(task_id, 'result')
# if still no response yet, continue polling
if not res_exists:
continue
res = self.cache.hget(task_id, b'result')
flight_time_response = self.cache.hget(task_id, b'flight_time_response')
break
else:
raise TimeoutError
else:
for _ in range(loop_times):
try:
res = self.cache[task_id]
flight_time_response = res[1]
res = res[0]
break
except:
time.sleep(self.max_polling_timeout)
else:
raise TimeoutError
if self.encrypt:
res = self.encrypt.decrypt(res)
if self.use_compression:
res = zstd.decompress(res)
res = self.parse_response(res)
return res, flight_time_response
def __getattr__(self, method_name):
return lambda *args, **kwargs: self.call(method_name, *args, **kwargs)
def close(self):
self.is_closed = True
if self.use_redis:
self.cache_channel.close()
self.cache.close()
self.consumer.close()
self.producer.flush()
self.pool.shutdown()
class QueueDict:
def __init__(self, maxlen=0, expire=128):
assert isinstance(maxlen, int) and maxlen >= 0
assert isinstance(expire, int) and expire >= 0
self.queue_key = deque()
self.queue_val = deque()
self.queue_duration = deque()
self.maxlen = maxlen
self.expire = expire
def __setitem__(self, key, val):
self.queue_key.append(key)
self.queue_val.append(val)
self.queue_duration.append(time.time())
self.remove_oldest()
def __getitem__(self, key):
idx = self.queue_key.index(key)
val = self.queue_val[idx]
self.queue_key.remove(self.queue_key[idx])
self.queue_val.remove(self.queue_val[idx])
self.queue_duration.remove(self.queue_duration[idx])
return val
def remove_oldest(self):
if self.maxlen is not None:
while True:
if len(self.queue_key) > self.maxlen:
self.queue_key.popleft()
self.queue_val.popleft()
self.queue_duration.popleft()
else:
break
if self.expire is not None:
num_pop = 0
for i in range(len(self.queue_duration)):
duration = time.time() - self.queue_duration[i - num_pop]
if duration > self.expire:
self.queue_key.popleft()
self.queue_val.popleft()
self.queue_duration.popleft()
num_pop += 1
else:
break
| # Copyright (c) 2017-2020, <NAME>
# All rights reserved.
"""
Update Log:
1.0.1: init
1.0.2: change project name from krpc to kafka_rpc
1.0.4: allow increasing message_max_bytes
1.0.5: support subscribing to multiple topics
1.0.6: stop using a global packer or unpacker, to ensure thread safety.
1.0.8: use gevent instead of built-in threading to speed up about 40%
1.0.9: support message compression
1.0.10: add argument max_queue_len to control the length of QueueDict
1.0.11: change redis backend behavior
"""
import logging
import pickle
import time
import uuid
import zlib
from collections import deque
from typing import Callable, Union
import zstd
logger = logging.getLogger(__name__)
from confluent_kafka import Producer, Consumer, KafkaError
import msgpack
import msgpack_numpy
msgpack_numpy.patch() # add numpy array support for msgpack
from kafka_rpc.aes import AESEncryption
class KRPCClient:
def __init__(self, *addresses, topic_name: Union[str, list],
max_polling_timeout: float = 0.001, **kwargs):
"""
Init Kafka RPCClient.
Not like the most of the RPC protocols,
Only one KRPCClient can run on a single Kafka topic.
If you insist using multiple KRPCClient instances,
redis must be used, pass argument use_redis=True.
Args:
addresses: kafka broker host, port, for examples: '192.168.1.117:9092'
topic_name: kafka topic_name(s), if topic exists,
the existing topic will be used,
create a new topic otherwise.
max_polling_timeout: maximum time(seconds) to block waiting for message, event or callback.
encrypt: default None, if not None, will encrypt the message with the given password. It will slow down performance.
verify: default False, if True, will verify the message with the given sha3 checksum from the headers.
use_redis: default False, if True, use redis as cache, built-in QueueDict otherwise.
ack: default False, if True, server will confirm the message status. Disable ack will double the speed, but not exactly safe.
use_gevent: default True, if True, use gevent instead of asyncio. If gevent version is lower than 1.5, krpc will not run on windows.
compression: default 'none', check https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md compression.codec. 'zstd' is bugged. Check https://github.com/confluentinc/confluent-kafka-python/issues/589
use_compression: default False, custom compression using zstd.
max_queue_len: int, default 1024, if use_redis is False, a QueueDict will cache results with the length of max_queue_len. This should be as low as it can be, otherwise OOM.
"""
bootstrap_servers = ','.join(addresses)
assert isinstance(topic_name, str) or isinstance(topic_name, list)
self.topic_names = [topic_name] if isinstance(topic_name, str) else topic_name
# self.server_topics = ['krpc_{}_server'.format(topic_name) for topic_name in self.topic_names]
self.client_topics = ['krpc_{}_client'.format(topic_name) for topic_name in self.topic_names]
# set max_polling_timeout
assert max_polling_timeout > 0, 'max_polling_timeout must be greater than 0'
self.max_polling_timeout = max_polling_timeout
self.consumer = Consumer({
'bootstrap.servers': bootstrap_servers,
'group.id': 'krpc',
'auto.offset.reset': 'earliest',
'auto.commit.interval.ms': 1000,
'compression.codec': kwargs.get('compression_codec', 'none')
})
# message_max_bytes = kwargs.get('message_max_bytes', 1048576),
# queue_buffering_max_kbytes = kwargs.get('queue_buffering_max_kbytes', 1048576),
# queue_buffering_max_messages = kwargs.get('queue_buffering_max_messages', 100000),
try:
message_max_bytes = kwargs['message_max_bytes']
except KeyError:
message_max_bytes = 1048576
try:
queue_buffering_max_kbytes = kwargs['queue_buffering_max_kbytes']
except KeyError:
queue_buffering_max_kbytes = 1048576
try:
queue_buffering_max_messages = kwargs['queue_buffering_max_messages']
except KeyError:
queue_buffering_max_messages = 100000
if message_max_bytes > 1048576:
logger.warning('message_max_bytes is greater than 1048576, '
'message.max.bytes and replica.fetch.max.bytes of '
'brokers\' config should be greater than this')
self.producer = Producer({
'bootstrap.servers': bootstrap_servers,
'on_delivery': self.delivery_report,
# custom parameters
'message.max.bytes': message_max_bytes,
'queue.buffering.max.kbytes': queue_buffering_max_kbytes,
'queue.buffering.max.messages': queue_buffering_max_messages,
'compression.codec': kwargs.get('compression_codec', 'none')
})
# add redis cache, for temporarily storage of returned data
self.use_redis = kwargs.get('use_redis', False)
self.expire_time = kwargs.get('expire_time', 600)
if self.use_redis:
import redis
redis_host = kwargs.get('redis_host', 'localhost')
redis_port = kwargs.get('redis_port', 6379)
redis_db = kwargs.get('redis_db', 0)
redis_password = kwargs.get('redis_password', None)
self.cache = redis.Redis(redis_host, redis_port, redis_db, redis_password)
self.cache_channel = self.cache.pubsub()
else:
self.cache = QueueDict(maxlen=kwargs.get('max_queue_len', 1024), expire=self.expire_time)
# set msgpack packer & unpacker, stop using a global packer or unpacker, to ensure thread safety.
# self.packer = msgpack.Packer(use_bin_type=True)
# self.unpacker = msgpack.Unpacker(use_list=False, raw=False)
self.verify = kwargs.get('verify', False)
self.verification_method = kwargs.get('verification', 'crc32')
if self.verification_method == 'crc32':
self.verification_method = lambda x: hex(zlib.crc32(x)).encode()
elif isinstance(self.verification_method, Callable):
self.verification_method = self.verification_method
else:
raise AssertionError('not supported verification function.')
self.encrypt = kwargs.get('encrypt', None)
if self.encrypt is not None:
self.encrypt = AESEncryption(self.encrypt, encrypt_length=16)
self.use_compression = kwargs.get('use_compression', False)
self.is_closed = False
# coroutine pool
use_gevent = kwargs.get('use_gevent', True)
if use_gevent:
from gevent.threadpool import ThreadPoolExecutor as gThreadPoolExecutor
self.pool = gThreadPoolExecutor(1)
else:
from aplex import ThreadAsyncPoolExecutor
self.pool = ThreadAsyncPoolExecutor(pool_size=1)
self.pool.submit(self.wait_forever)
# handshake, if's ok not to handshake, but the first rpc would be slow.
if kwargs.get('handshake', True):
self.handshaked = {}
self.subscribe(*self.topic_names)
# acknowledge, disable ack will double the speed, but not exactly safe.
self.ack = kwargs.get('ack', False)
def subscribe(self, *topic_names):
if not topic_names:
return
for topic_name in topic_names:
client_topic = 'krpc_{}_client'.format(topic_name)
self.topic_names.append(topic_name)
self.client_topics.append(client_topic)
self.consumer.subscribe(self.client_topics)
logger.info('adding consumer subscription of: {}'.format(topic_names))
if hasattr(self, 'handshaked'):
for topic_name in topic_names:
self.handshaked[topic_name] = False
server_topic = 'krpc_{}_server'.format(topic_name)
self.producer.produce(server_topic, b'handshake', b'handshake',
headers={
'checksum': None
})
self.producer.poll(0.0)
logger.info('sending handshake to {}'.format(server_topic))
for i in range(15):
if self.handshaked[topic_name]:
logger.info('handshake of {} succeeded.'.format(topic_name))
break
time.sleep(2)
else:
logger.error('failed to handshake with {}'.format(server_topic))
@staticmethod
def delivery_report(err, msg):
if err is not None:
logger.error('request failed: {}'.format(err))
else:
logger.info('request sent to {} [{}]'.format(msg.topic(), msg.partition()))
@staticmethod
def parse_response(msg_value):
try:
res = msgpack.unpackb(msg_value, use_list=False, raw=False)
except Exception as e:
logger.exception(e)
res = None
return res
def call(self, method_name, *args, **kwargs):
# rpc call timeout
# WARNING: if the rpc method has an argument named timeout, it will be not be passed.
timeout = kwargs.pop('timeout', 10)
# get topic_name
topic_name = kwargs.pop('topic_name', self.topic_names[0])
server_topic = 'krpc_{}_server'.format(topic_name)
start_time = time.time()
# send request back to server
req = {
'method_name': method_name,
'args': args,
'kwargs': kwargs
}
req = msgpack.packb(req, use_bin_type=True)
if self.use_compression:
req = zstd.compress(req)
if self.encrypt:
req = self.encrypt.encrypt(req)
if self.verify:
checksum = self.verification_method(req)
else:
checksum = None
task_id = uuid.uuid4().hex
self.producer.produce(server_topic, req, task_id,
headers={
'checksum': checksum
})
# waiting for response from server sync/async
res, flight_time_response = self.poll_result_from_cache(task_id, timeout)
if self.ack:
self.producer.poll(0.0)
# do something to the response
ret = res['ret']
tact_time_server = res['tact_time']
flight_time_request = res['flight_time_request']
server_id = res['server_id']
exception = res['exception']
tb = res['traceback']
if exception is not None:
exception = pickle.loads(exception)
logger.exception(exception)
if tb is not None:
logger.error(tb)
end_time = time.time()
return {
'ret': ret,
'tact_time': end_time - start_time,
'tact_time_server': tact_time_server,
'server_id': server_id,
'flight_time_request': flight_time_request,
'flight_time_response': flight_time_response
}
def wait_forever(self):
while True:
if self.is_closed:
logger.info('user exit')
break
try:
msg = self.consumer.poll(self.max_polling_timeout)
if msg is None:
continue
if msg.error():
logger.error("consumer error: {}".format(msg.error()))
continue
task_id = msg.key() # an uuid, the only id that pairs the request and the response
topic_name = msg.topic()
if task_id == b'handshake':
try:
real_topic_name = '_'.join(topic_name.split('_')[1:-1])
except:
logger.error('invalid topic name {}'.format(topic_name))
continue
self.handshaked[real_topic_name] = True
continue
res = msg.value()
headers = msg.headers()
timestamp = msg.timestamp()
checksum = headers[0][1]
if self.verify:
signature = self.verification_method(res)
if checksum != signature:
logger.error('checksum mismatch of task {}'.format(task_id))
continue
if self.use_redis:
self.cache.hset(task_id, b'result', res)
self.cache.hset(task_id, b'flight_time_response', time.time() - timestamp[1] / 1000)
self.cache.expire(task_id, self.expire_time)
else:
self.cache[task_id] = res, time.time() - timestamp[1] / 1000
# send signal for polling to search for result
...
except Exception as e:
logger.exception(e)
def poll_result_from_cache(self, task_id, timeout=10):
"""
poll_result_from_cache after receiving a signal from waiting
Args:
task_id:
timeout:
Returns:
"""
loop_times = int(timeout / self.max_polling_timeout)
task_id = task_id.encode()
if self.use_redis:
for _ in range(loop_times):
res_exists = self.cache.hexists(task_id, 'result')
# if still no response yet, continue polling
if not res_exists:
continue
res = self.cache.hget(task_id, b'result')
flight_time_response = self.cache.hget(task_id, b'flight_time_response')
break
else:
raise TimeoutError
else:
for _ in range(loop_times):
try:
res = self.cache[task_id]
flight_time_response = res[1]
res = res[0]
break
except:
time.sleep(self.max_polling_timeout)
else:
raise TimeoutError
if self.encrypt:
res = self.encrypt.decrypt(res)
if self.use_compression:
res = zstd.decompress(res)
res = self.parse_response(res)
return res, flight_time_response
def __getattr__(self, method_name):
return lambda *args, **kwargs: self.call(method_name, *args, **kwargs)
def close(self):
self.is_closed = True
if self.use_redis:
self.cache_channel.close()
self.cache.close()
self.consumer.close()
self.producer.flush()
self.pool.shutdown()
class QueueDict:
def __init__(self, maxlen=0, expire=128):
assert isinstance(maxlen, int) and maxlen >= 0
assert isinstance(expire, int) and expire >= 0
self.queue_key = deque()
self.queue_val = deque()
self.queue_duration = deque()
self.maxlen = maxlen
self.expire = expire
def __setitem__(self, key, val):
self.queue_key.append(key)
self.queue_val.append(val)
self.queue_duration.append(time.time())
self.remove_oldest()
def __getitem__(self, key):
idx = self.queue_key.index(key)
val = self.queue_val[idx]
self.queue_key.remove(self.queue_key[idx])
self.queue_val.remove(self.queue_val[idx])
self.queue_duration.remove(self.queue_duration[idx])
return val
def remove_oldest(self):
if self.maxlen is not None:
while True:
if len(self.queue_key) > self.maxlen:
self.queue_key.popleft()
self.queue_val.popleft()
self.queue_duration.popleft()
else:
break
if self.expire is not None:
num_pop = 0
for i in range(len(self.queue_duration)):
duration = time.time() - self.queue_duration[i - num_pop]
if duration > self.expire:
self.queue_key.popleft()
self.queue_val.popleft()
self.queue_duration.popleft()
num_pop += 1
else:
break
| en | 0.62217 | # Copyright (c) 2017-2020, <NAME> # All rights reserved. Update Log: 1.0.1: init 1.0.2: change project name from krpc to kafka_rpc 1.0.4: allow increasing message_max_bytes 1.0.5: support subscribing to multiple topics 1.0.6: stop using a global packer or unpacker, to ensure thread safety. 1.0.8: use gevent instead of built-in threading to speed up about 40% 1.0.9: support message compression 1.0.10: add argument max_queue_len to control the length of QueueDict 1.0.11: change redis backend behavior # add numpy array support for msgpack Init Kafka RPCClient. Not like the most of the RPC protocols, Only one KRPCClient can run on a single Kafka topic. If you insist using multiple KRPCClient instances, redis must be used, pass argument use_redis=True. Args: addresses: kafka broker host, port, for examples: '192.168.1.117:9092' topic_name: kafka topic_name(s), if topic exists, the existing topic will be used, create a new topic otherwise. max_polling_timeout: maximum time(seconds) to block waiting for message, event or callback. encrypt: default None, if not None, will encrypt the message with the given password. It will slow down performance. verify: default False, if True, will verify the message with the given sha3 checksum from the headers. use_redis: default False, if True, use redis as cache, built-in QueueDict otherwise. ack: default False, if True, server will confirm the message status. Disable ack will double the speed, but not exactly safe. use_gevent: default True, if True, use gevent instead of asyncio. If gevent version is lower than 1.5, krpc will not run on windows. compression: default 'none', check https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md compression.codec. 'zstd' is bugged. Check https://github.com/confluentinc/confluent-kafka-python/issues/589 use_compression: default False, custom compression using zstd. max_queue_len: int, default 1024, if use_redis is False, a QueueDict will cache results with the length of max_queue_len. This should be as low as it can be, otherwise OOM. # self.server_topics = ['krpc_{}_server'.format(topic_name) for topic_name in self.topic_names] # set max_polling_timeout # message_max_bytes = kwargs.get('message_max_bytes', 1048576), # queue_buffering_max_kbytes = kwargs.get('queue_buffering_max_kbytes', 1048576), # queue_buffering_max_messages = kwargs.get('queue_buffering_max_messages', 100000), # custom parameters # add redis cache, for temporarily storage of returned data # set msgpack packer & unpacker, stop using a global packer or unpacker, to ensure thread safety. # self.packer = msgpack.Packer(use_bin_type=True) # self.unpacker = msgpack.Unpacker(use_list=False, raw=False) # coroutine pool # handshake, if's ok not to handshake, but the first rpc would be slow. # acknowledge, disable ack will double the speed, but not exactly safe. # rpc call timeout # WARNING: if the rpc method has an argument named timeout, it will be not be passed. # get topic_name # send request back to server # waiting for response from server sync/async # do something to the response # an uuid, the only id that pairs the request and the response # send signal for polling to search for result poll_result_from_cache after receiving a signal from waiting Args: task_id: timeout: Returns: # if still no response yet, continue polling | 2.152518 | 2 |
main.py | sskrishnan/ayyanar | 0 | 6614068 | <filename>main.py
import sched
import subprocess
import time
class ProcessData:
def __init__(self, process, name, path,monitorstatus=None,cycles=None):
self.process = process
self.name = name
self.path = path
self.monitorstatus = monitorstatus
self.cycles = cycles
class UrlData:
def __init__(self, url, port, timeout,monitorstatus=None,cycles=None):
self.url = url
self.port = port
self.timeout = timeout
self.monitorstatus = monitorstatus
self.cycles = cycles
s = sched.scheduler(time.time, time.sleep)
processes=[]
connectivity=[]
def addprocess(path):
from subprocess import STDOUT
# Start process
executable="/bin/sh"
p = subprocess.Popen([executable,'-c', path],shell=False,stdout=subprocess.PIPE,stderr=STDOUT)
out, err = p.communicate()
print(out.decode())
processes.append(ProcessData(p,executable,path))
print("Starting program "+ path +" with PID :"+str(p.pid))
return p.pid
def addmonitorurl(url,port,timeout):
connectivity.append(UrlData(url,port,timeout))
def check_pids():
if processes is not None and len(processes)>0:
for p in processes :
poll = p.process.poll()
if poll is None:
print("Program "+ p.path +" with PID "+str(p.process.pid) +" is running")
continue
else:
print(str(p.process.pid) +" is dead or Z")
processes.remove(p)
addprocess(p.path)
def makeconnection(url,port,timeout):
import socket
try:
serv = socket.socket()
serv.settimeout(timeout)
serv.connect((url,port))
serv.send("hello\n")
serv.close()
return True
except Exception as e:
print ("Error during connection to "+url+":"+str(port)+" "+e.message)
serv.close()
return False
def monitorurl():
if connectivity is not None and len(connectivity)>0:
for c in connectivity :
response=makeconnection(c.url,c.port,c.timeout)
if(response):
print("Connection to "+c.url+":"+str(c.port)+" success ")
else:
print("Connection to "+c.url+":"+str(c.port)+" failed ")
def addconnectivitycheck(url):
connectivity.append(url)
pass
def main():
addprocess("keeprnning.sh")
addprocess("keeprunning1.sh")
addmonitorurl("google.com",443,1)
addmonitorurl("yahoo.com",443,1)
while True:
check_pids()
monitorurl()
time.sleep(5)
s.run()
main()
| <filename>main.py
import sched
import subprocess
import time
class ProcessData:
def __init__(self, process, name, path,monitorstatus=None,cycles=None):
self.process = process
self.name = name
self.path = path
self.monitorstatus = monitorstatus
self.cycles = cycles
class UrlData:
def __init__(self, url, port, timeout,monitorstatus=None,cycles=None):
self.url = url
self.port = port
self.timeout = timeout
self.monitorstatus = monitorstatus
self.cycles = cycles
s = sched.scheduler(time.time, time.sleep)
processes=[]
connectivity=[]
def addprocess(path):
from subprocess import STDOUT
# Start process
executable="/bin/sh"
p = subprocess.Popen([executable,'-c', path],shell=False,stdout=subprocess.PIPE,stderr=STDOUT)
out, err = p.communicate()
print(out.decode())
processes.append(ProcessData(p,executable,path))
print("Starting program "+ path +" with PID :"+str(p.pid))
return p.pid
def addmonitorurl(url,port,timeout):
connectivity.append(UrlData(url,port,timeout))
def check_pids():
if processes is not None and len(processes)>0:
for p in processes :
poll = p.process.poll()
if poll is None:
print("Program "+ p.path +" with PID "+str(p.process.pid) +" is running")
continue
else:
print(str(p.process.pid) +" is dead or Z")
processes.remove(p)
addprocess(p.path)
def makeconnection(url,port,timeout):
import socket
try:
serv = socket.socket()
serv.settimeout(timeout)
serv.connect((url,port))
serv.send("hello\n")
serv.close()
return True
except Exception as e:
print ("Error during connection to "+url+":"+str(port)+" "+e.message)
serv.close()
return False
def monitorurl():
if connectivity is not None and len(connectivity)>0:
for c in connectivity :
response=makeconnection(c.url,c.port,c.timeout)
if(response):
print("Connection to "+c.url+":"+str(c.port)+" success ")
else:
print("Connection to "+c.url+":"+str(c.port)+" failed ")
def addconnectivitycheck(url):
connectivity.append(url)
pass
def main():
addprocess("keeprnning.sh")
addprocess("keeprunning1.sh")
addmonitorurl("google.com",443,1)
addmonitorurl("yahoo.com",443,1)
while True:
check_pids()
monitorurl()
time.sleep(5)
s.run()
main()
| en | 0.767927 | # Start process | 2.898351 | 3 |
lineofservice/tests/factory.py | resourceidea/resourceideaapi | 1 | 6614069 | <gh_stars>1-10
import factory
from faker import Faker
from faker.providers import lorem # type: ignore
faker = Faker()
faker.add_provider(lorem)
class LineOfServiceFactory(factory.django.DjangoModelFactory):
"""Line of service factory"""
class Meta:
model = 'lineofservice.LineOfService'
name = faker.word()[:99]
organization = factory.SubFactory('organization.tests.factory.OrganizationFactory')
| import factory
from faker import Faker
from faker.providers import lorem # type: ignore
faker = Faker()
faker.add_provider(lorem)
class LineOfServiceFactory(factory.django.DjangoModelFactory):
"""Line of service factory"""
class Meta:
model = 'lineofservice.LineOfService'
name = faker.word()[:99]
organization = factory.SubFactory('organization.tests.factory.OrganizationFactory') | en | 0.762445 | # type: ignore Line of service factory | 1.966237 | 2 |
kenning/core/compiler.py | antmicro/edge-ai-tester | 20 | 6614070 | <reponame>antmicro/edge-ai-tester<filename>kenning/core/compiler.py
"""
Provides an API for model compilers.
"""
import argparse
from pathlib import Path
from typing import Dict, Tuple
from kenning.core.dataset import Dataset
class CompilationError(Exception):
pass
class ModelCompiler(object):
"""
Compiles the given model to a different format or runtime.
"""
def __init__(
self,
dataset: Dataset,
compiled_model_path: str,
dataset_percentage: float = 1.0):
"""
Prepares the ModelCompiler object.
Parameters
----------
dataset : Dataset
Dataset used to train the model - may be used for quantization
during compilation stage
compiled_model_path : str
Path to file where the compiled model should be saved
dataset_percentage : float
If the dataset is used for optimization (quantization), the
dataset_percentage determines how much of data samples is going
to be used
"""
self.dataset = dataset
self.compiled_model_path = compiled_model_path
self.dataset_percentage = dataset_percentage
@classmethod
def form_argparse(cls, quantizes_model: bool = False):
"""
Creates argparse parser for the ModelCompiler object.
Parameters
----------
quantizes_model : bool
Tells if the compiler quantizes model - if so, flags for
calibration dataset are enabled
Returns
-------
(ArgumentParser, ArgumentGroup) :
tuple with the argument parser object that can act as parent for
program's argument parser, and the corresponding arguments' group
pointer
"""
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group(title='Compiler arguments')
group.add_argument(
'--compiled-model-path',
help='The path to the compiled model output',
type=Path,
required=True
)
if quantizes_model:
group.add_argument(
'--dataset-percentage',
help='Tells how much data from dataset (from 0.0 to 1.0) ' +
'will be used for calibration dataset',
type=float,
default=0.25
)
return parser, group
@classmethod
def from_argparse(cls, dataset: Dataset, args):
"""
Constructor wrapper that takes the parameters from argparse args.
Parameters
----------
dataset : Dataset
The dataset object that is optionally used for optimization
args : Dict
arguments from ArgumentParser object
Returns
-------
ModelCompiler : object of class ModelCompiler
"""
if hasattr(args, 'dataset_percentage'):
return cls(
dataset,
args.compiled_model_path,
args.dataset_percentage
)
else:
return cls(
dataset,
args.compiled_model_path
)
def compile(
self,
inputmodelpath: Path,
inputshapes: Dict[str, Tuple[int, ...]],
dtype: str = 'float32'):
"""
Compiles the given model to a target format.
The function compiles the model and saves it to the output file.
The model can be compiled to a binary, a different framework or a
different programming language.
The additional compilation parameters that are not derivable from
the input and output format should be passed in the constructor or via
argument parsing.
The compiled model is saved to compiled_model_path
Parameters
----------
inputmodelpath : Path
Path to the input model
inputshapes : Dict[str, Tuple[int, ...]]
The dictionary with mapping (input name) -> (input shape)
dtype : str
The type of input tensors
"""
raise NotImplementedError
def get_framework_and_version(self) -> Tuple[str, str]:
"""
Returns name of the framework and its version in a form of a tuple.
"""
raise NotImplementedError
| """
Provides an API for model compilers.
"""
import argparse
from pathlib import Path
from typing import Dict, Tuple
from kenning.core.dataset import Dataset
class CompilationError(Exception):
pass
class ModelCompiler(object):
"""
Compiles the given model to a different format or runtime.
"""
def __init__(
self,
dataset: Dataset,
compiled_model_path: str,
dataset_percentage: float = 1.0):
"""
Prepares the ModelCompiler object.
Parameters
----------
dataset : Dataset
Dataset used to train the model - may be used for quantization
during compilation stage
compiled_model_path : str
Path to file where the compiled model should be saved
dataset_percentage : float
If the dataset is used for optimization (quantization), the
dataset_percentage determines how much of data samples is going
to be used
"""
self.dataset = dataset
self.compiled_model_path = compiled_model_path
self.dataset_percentage = dataset_percentage
@classmethod
def form_argparse(cls, quantizes_model: bool = False):
"""
Creates argparse parser for the ModelCompiler object.
Parameters
----------
quantizes_model : bool
Tells if the compiler quantizes model - if so, flags for
calibration dataset are enabled
Returns
-------
(ArgumentParser, ArgumentGroup) :
tuple with the argument parser object that can act as parent for
program's argument parser, and the corresponding arguments' group
pointer
"""
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group(title='Compiler arguments')
group.add_argument(
'--compiled-model-path',
help='The path to the compiled model output',
type=Path,
required=True
)
if quantizes_model:
group.add_argument(
'--dataset-percentage',
help='Tells how much data from dataset (from 0.0 to 1.0) ' +
'will be used for calibration dataset',
type=float,
default=0.25
)
return parser, group
@classmethod
def from_argparse(cls, dataset: Dataset, args):
"""
Constructor wrapper that takes the parameters from argparse args.
Parameters
----------
dataset : Dataset
The dataset object that is optionally used for optimization
args : Dict
arguments from ArgumentParser object
Returns
-------
ModelCompiler : object of class ModelCompiler
"""
if hasattr(args, 'dataset_percentage'):
return cls(
dataset,
args.compiled_model_path,
args.dataset_percentage
)
else:
return cls(
dataset,
args.compiled_model_path
)
def compile(
self,
inputmodelpath: Path,
inputshapes: Dict[str, Tuple[int, ...]],
dtype: str = 'float32'):
"""
Compiles the given model to a target format.
The function compiles the model and saves it to the output file.
The model can be compiled to a binary, a different framework or a
different programming language.
The additional compilation parameters that are not derivable from
the input and output format should be passed in the constructor or via
argument parsing.
The compiled model is saved to compiled_model_path
Parameters
----------
inputmodelpath : Path
Path to the input model
inputshapes : Dict[str, Tuple[int, ...]]
The dictionary with mapping (input name) -> (input shape)
dtype : str
The type of input tensors
"""
raise NotImplementedError
def get_framework_and_version(self) -> Tuple[str, str]:
"""
Returns name of the framework and its version in a form of a tuple.
"""
raise NotImplementedError | en | 0.632308 | Provides an API for model compilers. Compiles the given model to a different format or runtime. Prepares the ModelCompiler object. Parameters ---------- dataset : Dataset Dataset used to train the model - may be used for quantization during compilation stage compiled_model_path : str Path to file where the compiled model should be saved dataset_percentage : float If the dataset is used for optimization (quantization), the dataset_percentage determines how much of data samples is going to be used Creates argparse parser for the ModelCompiler object. Parameters ---------- quantizes_model : bool Tells if the compiler quantizes model - if so, flags for calibration dataset are enabled Returns ------- (ArgumentParser, ArgumentGroup) : tuple with the argument parser object that can act as parent for program's argument parser, and the corresponding arguments' group pointer Constructor wrapper that takes the parameters from argparse args. Parameters ---------- dataset : Dataset The dataset object that is optionally used for optimization args : Dict arguments from ArgumentParser object Returns ------- ModelCompiler : object of class ModelCompiler Compiles the given model to a target format. The function compiles the model and saves it to the output file. The model can be compiled to a binary, a different framework or a different programming language. The additional compilation parameters that are not derivable from the input and output format should be passed in the constructor or via argument parsing. The compiled model is saved to compiled_model_path Parameters ---------- inputmodelpath : Path Path to the input model inputshapes : Dict[str, Tuple[int, ...]] The dictionary with mapping (input name) -> (input shape) dtype : str The type of input tensors Returns name of the framework and its version in a form of a tuple. | 3.264647 | 3 |
core/urls.py | dchouzer/gamify-lyfe | 0 | 6614071 | <reponame>dchouzer/gamify-lyfe
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if settings.DEBUG:
urlpatterns = patterns('core.views',
url(r'^$', 'home'),
url(r'^logout$', 'logout'),
url(r'^register$', 'register'),
url(r'^dashboard$', 'dashboard'),
url(r'^rewards$', 'rewards'),
url(r'^search$', 'search'),
url(r'^avatar$', 'avatar'),
url(r'^share_settings/(?P<goalgroup>[^/]+)$', 'share_settings'),
url(r'^edit_sharee/(?P<goalgroup>[^/]+)$', 'edit_sharee'),
url(r'^add_share_setting/(?P<goalgroup>[^/]+)$', 'add_share_setting'),
url(r'^add_all_sharesettings/(?P<goalgroup>[^/]+)$', 'add_all_sharesettings'),
url(r'^delete_share_setting/(?P<sharesetting>[^/]+)$', 'delete_share_setting'),
url(r'^profile/(?P<username>[^/]+)$', 'profile'),
url(r'^addfriend/(?P<username>[^/]+)$', 'addfriend'),
url(r'^unfriend/(?P<username>[^/]+)$', 'unfriend'),
url(r'^group/(?P<group>[^/]+)$', 'group'),
url(r'^edit_group/(?P<group>[^/]+)$', 'edit_group'),
url(r'^delete_group/(?P<group>[^/]+)$', 'delete_group'),
url(r'^new_group_logo/(?P<group>[^/]+)$', 'new_group_logo'),
url(r'^add_membership/(?P<group>[^/]+)$', 'add_membership'),
url(r'^deny_membership/(?P<membership>[^/]+)$', 'deny_membership'),
url(r'^approve_membership/(?P<membership>[^/]+)$', 'approve_membership'),
url(r'^add_group$', 'add_group'),
url(r'^post_update/(?P<goal>[^/]+)$', 'post_update'),
url(r'^add_actionitem/(?P<goalgroup>[^/]+)$', 'add_actionitem'),
url(r'^add_goal$', 'add_goal'),
url(r'^add_reward$', 'add_reward'),
url(r'^buy_reward/(?P<reward>[^/]+)$', 'buy_reward'),
url(r'^retire_reward/(?P<reward>[^/]+)$', 'retire_reward'),
url(r'^delete_goal/(?P<goal>[^/]+)$', 'delete_goal'),
url(r'^delete_goal/(?P<goal>[^/]+)/(?P<neworder_num>[^/]+)$', 'flip_goals'),
url(r'^add_friendpoint/(?P<goal>[^/]+)$', 'add_friendpoint'),
url(r'^add_comment/(?P<update>[^/]+)$', 'add_comment'),
url(r'^delete_goalgroup/(?P<goalgroup>[^/]+)$', 'delete_goalgroup'),
)
| from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
if settings.DEBUG:
urlpatterns = patterns('core.views',
url(r'^$', 'home'),
url(r'^logout$', 'logout'),
url(r'^register$', 'register'),
url(r'^dashboard$', 'dashboard'),
url(r'^rewards$', 'rewards'),
url(r'^search$', 'search'),
url(r'^avatar$', 'avatar'),
url(r'^share_settings/(?P<goalgroup>[^/]+)$', 'share_settings'),
url(r'^edit_sharee/(?P<goalgroup>[^/]+)$', 'edit_sharee'),
url(r'^add_share_setting/(?P<goalgroup>[^/]+)$', 'add_share_setting'),
url(r'^add_all_sharesettings/(?P<goalgroup>[^/]+)$', 'add_all_sharesettings'),
url(r'^delete_share_setting/(?P<sharesetting>[^/]+)$', 'delete_share_setting'),
url(r'^profile/(?P<username>[^/]+)$', 'profile'),
url(r'^addfriend/(?P<username>[^/]+)$', 'addfriend'),
url(r'^unfriend/(?P<username>[^/]+)$', 'unfriend'),
url(r'^group/(?P<group>[^/]+)$', 'group'),
url(r'^edit_group/(?P<group>[^/]+)$', 'edit_group'),
url(r'^delete_group/(?P<group>[^/]+)$', 'delete_group'),
url(r'^new_group_logo/(?P<group>[^/]+)$', 'new_group_logo'),
url(r'^add_membership/(?P<group>[^/]+)$', 'add_membership'),
url(r'^deny_membership/(?P<membership>[^/]+)$', 'deny_membership'),
url(r'^approve_membership/(?P<membership>[^/]+)$', 'approve_membership'),
url(r'^add_group$', 'add_group'),
url(r'^post_update/(?P<goal>[^/]+)$', 'post_update'),
url(r'^add_actionitem/(?P<goalgroup>[^/]+)$', 'add_actionitem'),
url(r'^add_goal$', 'add_goal'),
url(r'^add_reward$', 'add_reward'),
url(r'^buy_reward/(?P<reward>[^/]+)$', 'buy_reward'),
url(r'^retire_reward/(?P<reward>[^/]+)$', 'retire_reward'),
url(r'^delete_goal/(?P<goal>[^/]+)$', 'delete_goal'),
url(r'^delete_goal/(?P<goal>[^/]+)/(?P<neworder_num>[^/]+)$', 'flip_goals'),
url(r'^add_friendpoint/(?P<goal>[^/]+)$', 'add_friendpoint'),
url(r'^add_comment/(?P<update>[^/]+)$', 'add_comment'),
url(r'^delete_goalgroup/(?P<goalgroup>[^/]+)$', 'delete_goalgroup'),
) | none | 1 | 1.910482 | 2 | |
corCTF2021/crypto-babypad/solve.py | willwam845/ctf | 2 | 6614072 | from pwn import *
from Crypto.Util.Padding import unpad
import time
s = remote(sys.argv[1],int(sys.argv[2])) #, level='debug')
ct = bytes.fromhex(s.recvline().decode())
t = time.time()
def bxor(ba1,ba2):
return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])
def query(msg):
s.sendlineafter(b">", msg.hex())
if int(s.recvline().decode()):
return 1
else:
return 0
def brute(b, iv, p1, p2, block, ct):
if query(iv + p1[:-1] + bytes([b]) + p2) and ((block+16) < len(ct) or b != ct[-1]):
return True
return False
assert query(ct)
assert not query(b'\x00'*32)
flag = b''
for block in range(16, len(ct), 16):
iv = ct[:block]
p1 = ct[block:block+16]
p2 = b""
i = 1
for i in range (1, 16):
b1, b2 = 0, 128
if p1[-1] >= 128:
b1, b2 = 128, 256
for byte in range(b1, b2):
print(i, byte)
if brute(byte, iv, p1, p2, block, ct):
p2 = bytes([byte]) + p2
p2 = bxor(p2, bytes([i]) * i)
p2 = bxor(p2, bytes([i+1]) * i)
p1 = p1[:-1]
print(i, byte)
break
for byte in range(256):
if query(iv + bytes([byte]) + p2):
p2 = bytes([byte]) + p2
break
flag += (bxor(bxor(p2, b'\x10' * 16), ct[block:block+16]))
print(flag)
print(unpad(flag, 16).decode())
print(f"Solved in {time.time() - t} seconds")
| from pwn import *
from Crypto.Util.Padding import unpad
import time
s = remote(sys.argv[1],int(sys.argv[2])) #, level='debug')
ct = bytes.fromhex(s.recvline().decode())
t = time.time()
def bxor(ba1,ba2):
return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])
def query(msg):
s.sendlineafter(b">", msg.hex())
if int(s.recvline().decode()):
return 1
else:
return 0
def brute(b, iv, p1, p2, block, ct):
if query(iv + p1[:-1] + bytes([b]) + p2) and ((block+16) < len(ct) or b != ct[-1]):
return True
return False
assert query(ct)
assert not query(b'\x00'*32)
flag = b''
for block in range(16, len(ct), 16):
iv = ct[:block]
p1 = ct[block:block+16]
p2 = b""
i = 1
for i in range (1, 16):
b1, b2 = 0, 128
if p1[-1] >= 128:
b1, b2 = 128, 256
for byte in range(b1, b2):
print(i, byte)
if brute(byte, iv, p1, p2, block, ct):
p2 = bytes([byte]) + p2
p2 = bxor(p2, bytes([i]) * i)
p2 = bxor(p2, bytes([i+1]) * i)
p1 = p1[:-1]
print(i, byte)
break
for byte in range(256):
if query(iv + bytes([byte]) + p2):
p2 = bytes([byte]) + p2
break
flag += (bxor(bxor(p2, b'\x10' * 16), ct[block:block+16]))
print(flag)
print(unpad(flag, 16).decode())
print(f"Solved in {time.time() - t} seconds")
| ko | 0.132211 | #, level='debug') | 2.176331 | 2 |
HttpApiManager/views.py | QiChangYin/MultipleInterfaceManager | 0 | 6614073 | order_data_list = ('1','2')
assert isinstance(order_data_list, list), 'order data must be a list' | order_data_list = ('1','2')
assert isinstance(order_data_list, list), 'order data must be a list' | none | 1 | 1.966206 | 2 | |
tests/__init__.py | ondrolexa/heat | 2 | 6614074 | <gh_stars>1-10
"""Unit test package for heatlib."""
| """Unit test package for heatlib.""" | en | 0.592771 | Unit test package for heatlib. | 0.899538 | 1 |
tests/bdd/features/qm/corfunctions/spectraldensities_steps.py | slamavl/quantarhei | 14 | 6614075 | # -*- coding: utf-8 -*-
from aloe import step
from aloe import world
import numpy
import matplotlib.pyplot as plt
from quantarhei import energy_units
from quantarhei import Manager
from quantarhei import CorrelationFunction
from quantarhei import SpectralDensity
from ...stepslib import read_n_columns
@step(r'correlation function parameters')
def correlation_function_parameters(self):
for row in self.hashes:
ftype = row['cf_type']
temp = float(row['temp'])
T_units = row['T_units']
reorg = float(row['reorg'])
e_units = row['e_units']
ctime = float(row['ctime'])
t_units = row['t_units']
mats = int(row['mats'])
world.e_units = e_units
world.temp_units = T_units
world.time_units = t_units
#world.params = params
world.ctype = ftype
world.reorg = reorg
world.ctime = ctime
world.temp = temp
world.mats = mats
@step(r'spectral density parameters')
def spectral_density_parameters(self):
for row in self.hashes:
ftype = row['cf_type']
temp = float(row['temp'])
T_units = row['T_units']
reorg = float(row['reorg'])
e_units = row['e_units']
ctime = float(row['ctime'])
t_units = row['t_units']
mats = int(row['mats'])
world.e_units = e_units
world.temp_units = T_units
world.time_units = t_units
world.ctype = ftype
world.reorg = reorg
world.ctime = ctime
world.temp = temp
world.mats = mats
@step(r'I calculate the ([^"]*) spectral density')
def spectral_density_of_type(self, ctype):
print("spectral density type ", ctype)
world.ctype = ctype
params = {"ftype": world.ctype,
"reorg": world.reorg,
"cortime": world.ctime,
"T": world.temp,
"matsubara":world.mats}
# FIXME: also time_units, temperature_units
with energy_units(world.e_units):
sd = SpectralDensity(world.ta, params)
world.sd = sd
@step(r'spectral density is created from correlation function')
def spectral_dens_from_corrfce(self):
#params = world.params
params = {"ftype": world.ctype,
"reorg": world.reorg,
"cortime": world.ctime,
"T": world.temp,
"matsubara":world.mats}
ta = world.ta
with energy_units(world.e_units):
cf = CorrelationFunction(ta,params)
world.sd = cf.get_SpectralDensity()
@step(r'spectral density corresponds to file ([^"]*) in internal units')
def compare_data_with_file(self, file):
print("comparing with file ", file)
sd_data = read_n_columns(__package__,file,2)
i = 0
data = numpy.zeros((world.sd.axis.data.shape[0],2))
for t in world.sd.axis.data:
data[i,0] = t
data[i,1] = numpy.real(world.sd.data[i])
#data[i,2] = numpy.imag(world.cf.data[i])
i += 1
#plt.plot(world.sd.axis.data,world.sd.data)
#plt.plot(world.sd.axis.data,sd_data[:,1],"--r")
#plt.show()
numpy.testing.assert_allclose(sd_data,data,rtol=1.0e-3,atol=1.0e-3)
@step(r'spectral density corresponds to analytical result for ([^"]*) in internal units')
def compare_spectral_dens_to_analytical(self, fctype):
m = Manager()
i = 0
sd_data = numpy.zeros((world.sd.axis.data.shape[0],2))
wa = world.ta.get_FrequencyAxis()
with energy_units("int"):
sd_data[:,0] = wa.data
omega = wa.data
with energy_units(world.e_units):
lamb = m.convert_energy_2_internal_u(world.reorg)
ctime = world.ctime
if fctype == "OverdampedBrownian":
# Analytical for for the overdamped Brownian spectral density
sd_data[:,1] = (2.0*lamb/ctime)*omega/(omega**2 + (1.0/ctime)**2)
else:
raise Exception()
data = numpy.zeros((world.sd.axis.data.shape[0],2))
for t in world.sd.axis.data:
data[i,0] = t
data[i,1] = numpy.real(world.sd.data[i])
#data[i,2] = numpy.imag(world.cf.data[i])
i += 1
diff = numpy.amax(numpy.abs(sd_data[:,1]-data[:,1]))
maxv = numpy.amax(numpy.abs(sd_data[:,1]))
print("Difference: ", diff, " on ", maxv)
numpy.testing.assert_allclose(sd_data,data,rtol=1.0e-3,atol=1.0e-3)
@step(r'I calculate odd FT of the correlation function')
def calculate_oddFT_from_corfce(self):
cf = world.cf
oddft = cf.get_OddFTCorrelationFunction()
world.oddft = oddft
@step(r'odd FT correlation function corresponds to spectral density')
def compare_oddFT_with_spectral_density(self):
sd = world.sd
oddft = world.oddft
numpy.testing.assert_allclose(oddft.axis.data,sd.axis.data,rtol=1.0e-7)
mx = numpy.max(numpy.abs(sd.data))
print("Maximum of the spectral density: ", mx)
df = numpy.max(numpy.abs(sd.data-oddft.data))
print("Maximum of the differnece between spectral density and odd FT: ",df)
print("Ratio of the two: ", df/mx)
Ndiv = 50.0
atol = mx/Ndiv
print("Checking with absolute tolerance mx /",Ndiv,"= ", atol)
numpy.testing.assert_allclose(oddft.data,sd.data,atol=atol) #1.0e-4) #df)
| # -*- coding: utf-8 -*-
from aloe import step
from aloe import world
import numpy
import matplotlib.pyplot as plt
from quantarhei import energy_units
from quantarhei import Manager
from quantarhei import CorrelationFunction
from quantarhei import SpectralDensity
from ...stepslib import read_n_columns
@step(r'correlation function parameters')
def correlation_function_parameters(self):
for row in self.hashes:
ftype = row['cf_type']
temp = float(row['temp'])
T_units = row['T_units']
reorg = float(row['reorg'])
e_units = row['e_units']
ctime = float(row['ctime'])
t_units = row['t_units']
mats = int(row['mats'])
world.e_units = e_units
world.temp_units = T_units
world.time_units = t_units
#world.params = params
world.ctype = ftype
world.reorg = reorg
world.ctime = ctime
world.temp = temp
world.mats = mats
@step(r'spectral density parameters')
def spectral_density_parameters(self):
for row in self.hashes:
ftype = row['cf_type']
temp = float(row['temp'])
T_units = row['T_units']
reorg = float(row['reorg'])
e_units = row['e_units']
ctime = float(row['ctime'])
t_units = row['t_units']
mats = int(row['mats'])
world.e_units = e_units
world.temp_units = T_units
world.time_units = t_units
world.ctype = ftype
world.reorg = reorg
world.ctime = ctime
world.temp = temp
world.mats = mats
@step(r'I calculate the ([^"]*) spectral density')
def spectral_density_of_type(self, ctype):
print("spectral density type ", ctype)
world.ctype = ctype
params = {"ftype": world.ctype,
"reorg": world.reorg,
"cortime": world.ctime,
"T": world.temp,
"matsubara":world.mats}
# FIXME: also time_units, temperature_units
with energy_units(world.e_units):
sd = SpectralDensity(world.ta, params)
world.sd = sd
@step(r'spectral density is created from correlation function')
def spectral_dens_from_corrfce(self):
#params = world.params
params = {"ftype": world.ctype,
"reorg": world.reorg,
"cortime": world.ctime,
"T": world.temp,
"matsubara":world.mats}
ta = world.ta
with energy_units(world.e_units):
cf = CorrelationFunction(ta,params)
world.sd = cf.get_SpectralDensity()
@step(r'spectral density corresponds to file ([^"]*) in internal units')
def compare_data_with_file(self, file):
print("comparing with file ", file)
sd_data = read_n_columns(__package__,file,2)
i = 0
data = numpy.zeros((world.sd.axis.data.shape[0],2))
for t in world.sd.axis.data:
data[i,0] = t
data[i,1] = numpy.real(world.sd.data[i])
#data[i,2] = numpy.imag(world.cf.data[i])
i += 1
#plt.plot(world.sd.axis.data,world.sd.data)
#plt.plot(world.sd.axis.data,sd_data[:,1],"--r")
#plt.show()
numpy.testing.assert_allclose(sd_data,data,rtol=1.0e-3,atol=1.0e-3)
@step(r'spectral density corresponds to analytical result for ([^"]*) in internal units')
def compare_spectral_dens_to_analytical(self, fctype):
m = Manager()
i = 0
sd_data = numpy.zeros((world.sd.axis.data.shape[0],2))
wa = world.ta.get_FrequencyAxis()
with energy_units("int"):
sd_data[:,0] = wa.data
omega = wa.data
with energy_units(world.e_units):
lamb = m.convert_energy_2_internal_u(world.reorg)
ctime = world.ctime
if fctype == "OverdampedBrownian":
# Analytical for for the overdamped Brownian spectral density
sd_data[:,1] = (2.0*lamb/ctime)*omega/(omega**2 + (1.0/ctime)**2)
else:
raise Exception()
data = numpy.zeros((world.sd.axis.data.shape[0],2))
for t in world.sd.axis.data:
data[i,0] = t
data[i,1] = numpy.real(world.sd.data[i])
#data[i,2] = numpy.imag(world.cf.data[i])
i += 1
diff = numpy.amax(numpy.abs(sd_data[:,1]-data[:,1]))
maxv = numpy.amax(numpy.abs(sd_data[:,1]))
print("Difference: ", diff, " on ", maxv)
numpy.testing.assert_allclose(sd_data,data,rtol=1.0e-3,atol=1.0e-3)
@step(r'I calculate odd FT of the correlation function')
def calculate_oddFT_from_corfce(self):
cf = world.cf
oddft = cf.get_OddFTCorrelationFunction()
world.oddft = oddft
@step(r'odd FT correlation function corresponds to spectral density')
def compare_oddFT_with_spectral_density(self):
sd = world.sd
oddft = world.oddft
numpy.testing.assert_allclose(oddft.axis.data,sd.axis.data,rtol=1.0e-7)
mx = numpy.max(numpy.abs(sd.data))
print("Maximum of the spectral density: ", mx)
df = numpy.max(numpy.abs(sd.data-oddft.data))
print("Maximum of the differnece between spectral density and odd FT: ",df)
print("Ratio of the two: ", df/mx)
Ndiv = 50.0
atol = mx/Ndiv
print("Checking with absolute tolerance mx /",Ndiv,"= ", atol)
numpy.testing.assert_allclose(oddft.data,sd.data,atol=atol) #1.0e-4) #df)
| en | 0.276768 | # -*- coding: utf-8 -*- #world.params = params # FIXME: also time_units, temperature_units #params = world.params #data[i,2] = numpy.imag(world.cf.data[i]) #plt.plot(world.sd.axis.data,world.sd.data) #plt.plot(world.sd.axis.data,sd_data[:,1],"--r") #plt.show() # Analytical for for the overdamped Brownian spectral density #data[i,2] = numpy.imag(world.cf.data[i]) #1.0e-4) #df) | 2.395646 | 2 |
arena5/core/proxy_env.py | anhddo/ai-arena | 0 | 6614076 |
# ©2020 Johns Hopkins University Applied Physics Laboratory LLC.
import numpy as np
from arena5.core.utils import mpi_print
'''
This file contains magic arena sauce.
A environment proxy conforms to either the standard gym interface or the arena
multiagent version depending on how many entity indexes it is passed.
Algorithms will interact with this proxy within their own process, and the proxy
will use MPI to shuttle the data back and forth to a different process running the environment.
For the environment side, see env_process.py
'''
def make_proxy_env(entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank):
if len(entity_idxs) == 1:
return gym_proxy_env(entity_idxs[0], obs_spaces[0], act_spaces[0], match_comm, match_root_rank)
else:
return ma_proxy_env(entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank)
class gym_proxy_env():
def __init__(self, entity_idx, obs_space, act_space, match_comm, match_root_rank):
self.observation_space = obs_space
self.action_space = act_space
self.comm = match_comm
self.match_root_rank = match_root_rank
self.entity_idx = entity_idx
self.is_multiagent = False
def seed(self, sd):
pass
def reset(self):
states = self.comm.bcast(None, root=self.match_root_rank)
state = states[self.entity_idx]
return state
def step(self, action):
#make np array if it is not already
action = np.asarray([action])
#make sure action is a 1D array
while len(action.shape) < 1:
action = np.expand_dims(action, -1)
while len(action.shape) > 1:
action = np.squeeze(action)
#convert to list
action = action.tolist()
#send actions to main env proc
action_packet = [[self.entity_idx], [action]]
self.comm.gather(action_packet, root=self.match_root_rank)
#get resulting info
result = self.comm.bcast(None, root=self.match_root_rank)
nss, rs, done, infos = result
ns, r, info = nss[self.entity_idx], rs[self.entity_idx], infos[self.entity_idx]
return ns, r, done, info
class ma_proxy_env():
def __init__(self, entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank):
self.observation_spaces = obs_spaces
self.action_spaces = act_spaces
self.comm = match_comm
self.match_root_rank = match_root_rank
self.entity_idxs = entity_idxs
self.is_multiagent = True
def seed(self, sd):
pass
def reset(self, **kwargs):
states = self.comm.bcast(None, root=self.match_root_rank)
ret_states = []
for idx in self.entity_idxs:
ret_states.append(states[idx])
return ret_states
def step(self, actions):
#assume this contains properly formatted actions
#and that it indeed contains more than one action
#convert to a list in case it is a numpy array
actions = np.asarray(actions).tolist()
fmtactions = []
for a in actions:
#make np array if it is not already
action = np.asarray([a])
#make sure action is a 1D array
while len(action.shape) < 1:
action = np.expand_dims(action, -1)
while len(action.shape) > 1:
action = np.squeeze(action)
#convert to list
action = action.tolist()
fmtactions.append(action)
#send actions to main env proc
action_packet = [self.entity_idxs, fmtactions]
self.comm.gather(action_packet, root=self.match_root_rank)
#get resulting info
result = self.comm.bcast(None, root=self.match_root_rank)
nss, rs, done, infs = result
next_states = []
rewards = []
infos = []
for idx in self.entity_idxs:
next_states.append(nss[idx])
rewards.append(rs[idx])
infos.append(infs[idx])
return next_states, rewards, done, infos |
# ©2020 Johns Hopkins University Applied Physics Laboratory LLC.
import numpy as np
from arena5.core.utils import mpi_print
'''
This file contains magic arena sauce.
A environment proxy conforms to either the standard gym interface or the arena
multiagent version depending on how many entity indexes it is passed.
Algorithms will interact with this proxy within their own process, and the proxy
will use MPI to shuttle the data back and forth to a different process running the environment.
For the environment side, see env_process.py
'''
def make_proxy_env(entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank):
if len(entity_idxs) == 1:
return gym_proxy_env(entity_idxs[0], obs_spaces[0], act_spaces[0], match_comm, match_root_rank)
else:
return ma_proxy_env(entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank)
class gym_proxy_env():
def __init__(self, entity_idx, obs_space, act_space, match_comm, match_root_rank):
self.observation_space = obs_space
self.action_space = act_space
self.comm = match_comm
self.match_root_rank = match_root_rank
self.entity_idx = entity_idx
self.is_multiagent = False
def seed(self, sd):
pass
def reset(self):
states = self.comm.bcast(None, root=self.match_root_rank)
state = states[self.entity_idx]
return state
def step(self, action):
#make np array if it is not already
action = np.asarray([action])
#make sure action is a 1D array
while len(action.shape) < 1:
action = np.expand_dims(action, -1)
while len(action.shape) > 1:
action = np.squeeze(action)
#convert to list
action = action.tolist()
#send actions to main env proc
action_packet = [[self.entity_idx], [action]]
self.comm.gather(action_packet, root=self.match_root_rank)
#get resulting info
result = self.comm.bcast(None, root=self.match_root_rank)
nss, rs, done, infos = result
ns, r, info = nss[self.entity_idx], rs[self.entity_idx], infos[self.entity_idx]
return ns, r, done, info
class ma_proxy_env():
def __init__(self, entity_idxs, obs_spaces, act_spaces, match_comm, match_root_rank):
self.observation_spaces = obs_spaces
self.action_spaces = act_spaces
self.comm = match_comm
self.match_root_rank = match_root_rank
self.entity_idxs = entity_idxs
self.is_multiagent = True
def seed(self, sd):
pass
def reset(self, **kwargs):
states = self.comm.bcast(None, root=self.match_root_rank)
ret_states = []
for idx in self.entity_idxs:
ret_states.append(states[idx])
return ret_states
def step(self, actions):
#assume this contains properly formatted actions
#and that it indeed contains more than one action
#convert to a list in case it is a numpy array
actions = np.asarray(actions).tolist()
fmtactions = []
for a in actions:
#make np array if it is not already
action = np.asarray([a])
#make sure action is a 1D array
while len(action.shape) < 1:
action = np.expand_dims(action, -1)
while len(action.shape) > 1:
action = np.squeeze(action)
#convert to list
action = action.tolist()
fmtactions.append(action)
#send actions to main env proc
action_packet = [self.entity_idxs, fmtactions]
self.comm.gather(action_packet, root=self.match_root_rank)
#get resulting info
result = self.comm.bcast(None, root=self.match_root_rank)
nss, rs, done, infs = result
next_states = []
rewards = []
infos = []
for idx in self.entity_idxs:
next_states.append(nss[idx])
rewards.append(rs[idx])
infos.append(infs[idx])
return next_states, rewards, done, infos | en | 0.824372 | # ©2020 Johns Hopkins University Applied Physics Laboratory LLC. This file contains magic arena sauce. A environment proxy conforms to either the standard gym interface or the arena multiagent version depending on how many entity indexes it is passed. Algorithms will interact with this proxy within their own process, and the proxy will use MPI to shuttle the data back and forth to a different process running the environment. For the environment side, see env_process.py #make np array if it is not already #make sure action is a 1D array #convert to list #send actions to main env proc #get resulting info #assume this contains properly formatted actions #and that it indeed contains more than one action #convert to a list in case it is a numpy array #make np array if it is not already #make sure action is a 1D array #convert to list #send actions to main env proc #get resulting info | 2.245273 | 2 |
scripts/slave/recipe_modules/gatekeeper/test_api.py | bopopescu/chromium-build | 0 | 6614077 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from recipe_engine import recipe_test_api
# Path to the production trees file. We need to use system os.path to make this
# available as test data whereever the simulation is run.
PROD_TREES_FILE = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', 'gatekeeper_trees.json'))
class GatekeeperTestApi(recipe_test_api.RecipeTestApi):
def fake_test_data(self, data=None):
if not data:
data = self.fake_test_json()
return self.m.json.output(data)
def fake_test_json(self):
return {
'blink': {
'build-db': 'blink_build_db.json',
'masters': {
'https://build.chromium.org/p/chromium.webkit': ["*"],
},
'filter-domain': 'google.com',
'open-tree': True,
'password-file': <PASSWORD>',
'revision-properties': 'got_revision,got_webkit_revision',
'set-status': True,
'sheriff-url': 'https://build.chromium.org/p/chromium/%s.js',
'status-url': 'https://blink-status.appspot.com',
'status-user': '<EMAIL>',
'track-revisions': True,
'use-project-email-address': True,
},
'chromium': {},
}
def production_data(self):
with open(PROD_TREES_FILE) as f:
return self.m.json.output(json.load(f))
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from recipe_engine import recipe_test_api
# Path to the production trees file. We need to use system os.path to make this
# available as test data whereever the simulation is run.
PROD_TREES_FILE = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', 'gatekeeper_trees.json'))
class GatekeeperTestApi(recipe_test_api.RecipeTestApi):
def fake_test_data(self, data=None):
if not data:
data = self.fake_test_json()
return self.m.json.output(data)
def fake_test_json(self):
return {
'blink': {
'build-db': 'blink_build_db.json',
'masters': {
'https://build.chromium.org/p/chromium.webkit': ["*"],
},
'filter-domain': 'google.com',
'open-tree': True,
'password-file': <PASSWORD>',
'revision-properties': 'got_revision,got_webkit_revision',
'set-status': True,
'sheriff-url': 'https://build.chromium.org/p/chromium/%s.js',
'status-url': 'https://blink-status.appspot.com',
'status-user': '<EMAIL>',
'track-revisions': True,
'use-project-email-address': True,
},
'chromium': {},
}
def production_data(self):
with open(PROD_TREES_FILE) as f:
return self.m.json.output(json.load(f))
| en | 0.918446 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Path to the production trees file. We need to use system os.path to make this # available as test data whereever the simulation is run. | 2.003725 | 2 |
app/urls.py | encarcelado/ultra_eruz | 0 | 6614078 | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from app import views
from rest_framework import routers
# router = routers.DefaultRouter()
# router.register('users', views.AuthUserView)
urlpatterns = [
# Matches any html file
re_path(r'^.*\.html', views.pages, name='pages'),
# The home page
path('', views.index, name='home'),
]
#
# urlpatterns = [
# path('admin/', admin.site.urls),
# path("", include("authentication.urls")), # add this
# path("", include("app.urls")) # add this
# ] | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from app import views
from rest_framework import routers
# router = routers.DefaultRouter()
# router.register('users', views.AuthUserView)
urlpatterns = [
# Matches any html file
re_path(r'^.*\.html', views.pages, name='pages'),
# The home page
path('', views.index, name='home'),
]
#
# urlpatterns = [
# path('admin/', admin.site.urls),
# path("", include("authentication.urls")), # add this
# path("", include("app.urls")) # add this
# ] | en | 0.550624 | # -*- encoding: utf-8 -*- License: MIT Copyright (c) 2019 - present AppSeed.us # router = routers.DefaultRouter() # router.register('users', views.AuthUserView) # Matches any html file # The home page # # urlpatterns = [ # path('admin/', admin.site.urls), # path("", include("authentication.urls")), # add this # path("", include("app.urls")) # add this # ] | 1.80681 | 2 |
startup/08-accelerator.py | jwlodek/profile_collection | 0 | 6614079 | print(__file__)
from ophyd import (EpicsMotor, Device, Component as Cpt,
EpicsSignal)
#import numpy as np
class Accelerator(Device):
beam_current = Cpt(EpicsSignal, ':OPS-BI{DCCT:1}I:Real-I')
life_time = Cpt(EpicsSignal, ':OPS-BI{DCCT:1}Lifetime-I')
status = Cpt(EpicsSignal,'-OPS{}Mode-Sts')
nsls_ii=Accelerator('SR', name='nsls_ii')
| print(__file__)
from ophyd import (EpicsMotor, Device, Component as Cpt,
EpicsSignal)
#import numpy as np
class Accelerator(Device):
beam_current = Cpt(EpicsSignal, ':OPS-BI{DCCT:1}I:Real-I')
life_time = Cpt(EpicsSignal, ':OPS-BI{DCCT:1}Lifetime-I')
status = Cpt(EpicsSignal,'-OPS{}Mode-Sts')
nsls_ii=Accelerator('SR', name='nsls_ii')
| en | 0.779721 | #import numpy as np | 2.361414 | 2 |
lib/gaussian_process/gaussian_process.py | thangbk2209/mfea_autoscaling | 0 | 6614080 | <reponame>thangbk2209/mfea_autoscaling
# example of bayesian optimization for a 1d function from scratch
import math
from math import pi
from operator import indexOf
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import arange
from numpy import vstack
from numpy import argmax, argmin
from numpy import asarray
from numpy.core.fromnumeric import argmin
from numpy.random import random
from scipy.stats import norm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import ExpSineSquared, WhiteKernel, ConstantKernel
from warnings import catch_warnings
from warnings import simplefilter
from matplotlib import pyplot
#from skopt import gp_minimize
from lib.includes.utility import *
from config import *
# example of bayesian optimization for a 1d function from scratch
from math import sin
from math import pi
from operator import indexOf
import numpy as np
from numpy import arange
from numpy import vstack
from numpy import argmax,argmin
from mpl_toolkits.mplot3d import Axes3D
from lib.includes.utility import *
from config import *
from numpy import asarray
from numpy.core.fromnumeric import argmin
from numpy.random import normal
from numpy.random import random
from scipy.stats import norm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import ExpSineSquared, WhiteKernel, ConstantKernel
from warnings import catch_warnings
from warnings import simplefilter
from matplotlib import pyplot
from lib.includes.utility import *
from config import *
class SingleGaussProcess:
def __init__(self,objective):
self.objective = objective
self.cloud_metrics = {
'train_data_type': 'mem',
'predict_data': 'mem'
}
self.x = []
self.y = []
self.name = []
self.max_iteration = Config.MAX_ITER
self.estimate=[0]
if Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "mem":
self.cloud_metrics = {
'train_data_type': 'cpu',
'predict_data': 'cpu'
}
self._parse_domain()
def _parse_domain(self):
domain = Config.LSTM_CONFIG['domain']
names = []
type_attr = []
max_val = []
min_val = []
range_val = []
for attr in domain:
names.append(attr['name'])
type_attr.append(attr['type'])
if attr['type'] == 'discrete':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][len(attr['domain']) - 1])
elif attr['type'] == 'continuous':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][1])
range_val.append(attr['domain'])
Xsample=[]
for index,value in enumerate(type_attr):
if value == 'discrete':
_x = (np.random.choice(range_val[index])-min_val[index])/(max_val[index]-min_val[index])
#print(_x)
Xsample.append(_x)
if value == 'continuous':
_x = (np.random.rand() * (max_val[index] - min_val[index]))/(max_val[index]-min_val[index])
Xsample.append(_x)
self.name = names
self.type_attr = type_attr
self.max_val = np.array(max_val)
self.min_val = np.array(min_val)
self.range_val = range_val
self.x.append(Xsample)
#print(self.convert_sample(Xsample))
self.y.append(self.objective(self.decode_sample(self.convert_sample(Xsample)), cloud_metrics=self.cloud_metrics)[0])
def convert_sample(self,sample):
x = []
for i in range(len(sample)):
if i in [0,1]:
x.append(int(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i]))
elif i in [2,3,4]:
x.append(int(int(sample[int(i)]*(self.max_val[int(i)]-self.min_val[int(i)]))+self.min_val[int(i)]))
elif i in [5,6]:
x.append(sample[i]*(self.max_val[i]-self.min_val[i])+self.min_val[i])
else:
x.append(int(sample[i]*(self.max_val[i]-self.min_val[i])+self.min_val[i]))
return x #x_mem
def decode_sample(self, position):
result = {}
for i,name in enumerate(self.name):
result[name] = position[i]
return result
# surrogate or approximation for the objective function
def surrogate(self,model, X):
# catch any warning generated when making a prediction
with catch_warnings():
# ignore generated warnings
simplefilter("ignore")
return model.predict(X, return_std=True)
# probability of improvement acquisition function
def acquisition(self, X, Xsamples, model):
# calculate the best surrogate score found so far
yhat, _ = self.surrogate(model, X)
best = min(yhat)
# calculate mean and stdev via surrogate function
mu, std = self.surrogate(model, Xsamples)
try:
mu = mu[:, 0]
except:
mu=mu
# calculate the probability of improvement
probs = norm.cdf((mu - best) / (std+1E-9))
return probs
def opt_acquisition(self, X, y, model):
# random search, generate random samples
Xsamples = []
for j in range(100):
x_sample = []
for index,value in enumerate(self.type_attr):
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
if value == 'continuous':
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
Xsamples.append(x_sample)
# calculate the acquisition function for each sample
scores = self.acquisition(X, Xsamples, model)
ix = argmin(scores)
print(ix)
return Xsamples[ix]
def optimize(self):
model = GaussianProcessRegressor()
for i in range(self.max_iteration):
# select the next point to sample
x = self.opt_acquisition(self.x,self.y, model)
# sample the point
print(self.convert_sample(x))
print(x)
actual = self.objective(self.decode_sample(self.convert_sample(x)),cloud_metrics=self.cloud_metrics)[0]
# summarize the finding
est, _ = self.surrogate(model, [x])
#self.estimate=[0]
#print(self.x)
print('>x1={},x2={}, f()={}, actual={}'.format(x[0],x[1], est, actual))
# add the data to the dataset
if not math.isnan(actual):
self.x = vstack((self.x, [x]))
self.y = vstack((self.y, [actual]))
self.estimate.append(est)
#self.estimate.append(est)
#self.x = vstack((self.x, [x]))
#self.y = vstack((self.y, [actual]))
# update the model
model.fit(self.x, self.y)
ix = argmin(self.y)
print('Best Result: x1=%.3f,x2=%3f, y=%.3f' % (self.x[ix][0],self.x[ix][1], self.y[ix]))
return self.x[ix]
if Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "mem":
files = open("gaussprocess_singletask_mem_result.csv","w")
elif Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "cpu":
files = open("gaussprocess_singletask_cpu_result.csv","w")
files.write("x;y_estimate;y_mem_actual\n")
for i in range(len(self.y)):
#print(i)
files.write("{};{};{}\n".format(self.decode_sample(self.x[i]),self.y_estimate[i], self.y[i]))
return self.x[optimal_sample_idx]
files.close()
class GaussProcess:
def __init__(self, objective_function):
self.objective_function = objective_function
self.x = [] # Chromosome that has fitness value
self.y = [] # Fit value of chromosome in X
self.name = []
self.estimate=[0]
self.cloud_metrics = {
'train_data_type': 'cpu',
'predict_data': 'cpu'
}
self.alpha = Config.ALPHA
self.population_size = Config.POPULATION_SIZE
self.max_iteration = Config.MAX_ITER
self.x_cpu , self.x_mem, self.y_cpu_actual, self.y_mem_actual = [], [] , [], []
self._parse_domain()
def gen_sample(self):
x_sample = []
for index, value in enumerate(self.type_attr):
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
#print(_x)
x_sample.append(_x)
#x_sample_memory.append(_x)
if value == 'continuous':
# _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr))
# _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5)
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
#x_sample_memory.append(_x)
if self.name[index] in ["sliding","network_size","layer_size"]:
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
if value == 'continuous':
# _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr))
# _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5)
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
#print(x_sample)
return x_sample
def _parse_domain(self):
domain = Config.LSTM_CONFIG['domain']
names = []
type_attr = []
max_val = []
min_val = []
range_val = []
for attr in domain:
names.append(attr['name'])
type_attr.append(attr['type'])
if attr['type'] == 'discrete':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][len(attr['domain']) - 1])
elif attr['type'] == 'continuous':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][1])
range_val.append(attr['domain'])
self.name = names
self.type_attr = type_attr
self.max_val = np.array(max_val)
self.min_val = np.array(min_val)
self.range_val = range_val
x_sample = self.gen_sample()
print(x_sample)
self.x.append(x_sample)
x_cpu,x_mem = self.split_sample(x_sample)
self.x_cpu.append(self.decode_sample(x_cpu))
self.x_mem.append(self.decode_sample(x_mem))
y_cpu = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
y_mem = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
self.y_cpu_actual.append(y_cpu)
self.y_mem_actual.append(y_mem)
self.y.append(self.alpha*y_cpu + (1-self.alpha)*y_mem)
def split_sample(self,sample):
x_cpu = []
x_mem = []
#print(sample)
for i in range(len(sample)):
if i in [0,1]:
x_cpu.append(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i])
x_mem.append(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i])
elif i in [2,4,6]:
x_cpu.append(int(sample[int(i-(i-2)/2)]*(self.max_val[int(i-(i-2)/2)]-self.min_val[int(i-(i-2)/2)]))+self.min_val[int(i-(i-2)/2)])
elif i in [3,5,7]:
x_mem.append(int(sample[int(i-1-(i-3)/2)]*(self.max_val[int(i-1-(i-3)/2)]-self.min_val[int(i-1-(i-3)/2)]))+self.min_val[int(i-1-(i-3)/2)])
elif i in [8,9]:
x_cpu.append(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])+self.min_val[i-3])
x_mem.append(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])+self.min_val[i-3])
else:
x_cpu.append(int(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])))
x_mem.append(int(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])))
#print(x_cpu,x_mem)
return x_cpu, x_mem
def decode_sample(self, sample):
result = {}
for i, name in enumerate(self.name):
if name in ["learning_rate","dropout"]:
result[name] = sample[i]
else:
result[name]=int(sample[i])
return result
# surrogate or approximation for the objective function
def surrogate(self, x):
# catch any warning generated when making a prediction
with catch_warnings():
# ignore generated warnings
simplefilter('ignore')
return self.gaussian_process_model.predict(x, return_std=True)
# probability of improvement acquisition function
def acquisition(self, x, x_samples):
# calculate the best surrogate score found so far
yhat, _ = self.surrogate(x)
best = min(yhat)
# calculate mean and stdev via surrogate function
mu, std = self.surrogate(x_samples)
try:
mu = mu[:, 0]
except:
mu = mu
# calculate the probability of improvement
probs = norm.cdf((mu - best) / (std + 1E-9))
return probs
def opt_acquisition(self, x):
# random search, generate random samples
x_samples = []
for j in range(self.population_size):
x_sample = self.gen_sample()
x_samples.append(x_sample)
#print(x[:,0])
#print("_____________________________")
#print(x_samples[:,0])
# calculate the acquisition function for each sample
scores = self.acquisition(x, x_samples)
min_sample_idx = argmin(scores)
#min_sample_idx2 = argmin(scores)
return x_samples[min_sample_idx]
def optimize(self):
self.gaussian_process_model = GaussianProcessRegressor()
#self.gaussian_process_model_mem = GaussianProcessRegressor()
for i in range(self.max_iteration):
# select the next point to sample
x = self.opt_acquisition(self.x)
# sample the point
x_cpu, x_mem = self.split_sample(x)
y_cpu_actual = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
y_mem_actual = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
actual = self.alpha*y_cpu_actual + (1-self.alpha)*y_mem_actual
# summarize the finding
est, _ = self.surrogate([x])
#est1, _1 = self.surrogate([x[0]],type="cpu")
#print(est)
print('>x={}, f()={}, actual={}'.format(x, est, actual))
#print('>x1={},c f()={}, actual={}'.format(x[1], est1, actual))
# add the data to the dataset
if not math.isnan(actual):
self.x_cpu.append(self.decode_sample(x_cpu))
self.x_mem.append(self.decode_sample(x_mem))
y_cpu = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
#y_mem = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
self.y_cpu_actual.append(y_cpu_actual)
self.y_mem_actual.append(y_mem_actual)
self.x = vstack((self.x, [x]))
self.y = vstack((self.y, [actual]))
self.estimate.append(est)
# update the gausian model
self.gaussian_process_model.fit(self.x, self.y)
#self.gaussian_process_model_mem.fit(self.x[:,0], self.y[:,0])
optimal_sample_idx = argmin(self.y)
print(f'Best Result: x1={self.x[optimal_sample_idx][0]},x2={self.x[optimal_sample_idx][1]}, y={self.y[optimal_sample_idx]}')
files = open("gaussprocess_mutitask_result.csv","w")
files.write("x_cpu;x_mem,y;y_cpu_actual;y_mem_actual\n")
print(len(self.x))
print(len(self.y))
print(len(self.estimate))
for i in range(len(self.y)):
print(i)
files.write("{};{};{};{};{}\n".format(self.x_cpu[i],self.x_mem[i] , self.estimate[i], self.y_cpu_actual[i], self.y_mem_actual[i]))
return self.x[optimal_sample_idx]
| # example of bayesian optimization for a 1d function from scratch
import math
from math import pi
from operator import indexOf
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import arange
from numpy import vstack
from numpy import argmax, argmin
from numpy import asarray
from numpy.core.fromnumeric import argmin
from numpy.random import random
from scipy.stats import norm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import ExpSineSquared, WhiteKernel, ConstantKernel
from warnings import catch_warnings
from warnings import simplefilter
from matplotlib import pyplot
#from skopt import gp_minimize
from lib.includes.utility import *
from config import *
# example of bayesian optimization for a 1d function from scratch
from math import sin
from math import pi
from operator import indexOf
import numpy as np
from numpy import arange
from numpy import vstack
from numpy import argmax,argmin
from mpl_toolkits.mplot3d import Axes3D
from lib.includes.utility import *
from config import *
from numpy import asarray
from numpy.core.fromnumeric import argmin
from numpy.random import normal
from numpy.random import random
from scipy.stats import norm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import ExpSineSquared, WhiteKernel, ConstantKernel
from warnings import catch_warnings
from warnings import simplefilter
from matplotlib import pyplot
from lib.includes.utility import *
from config import *
class SingleGaussProcess:
def __init__(self,objective):
self.objective = objective
self.cloud_metrics = {
'train_data_type': 'mem',
'predict_data': 'mem'
}
self.x = []
self.y = []
self.name = []
self.max_iteration = Config.MAX_ITER
self.estimate=[0]
if Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "mem":
self.cloud_metrics = {
'train_data_type': 'cpu',
'predict_data': 'cpu'
}
self._parse_domain()
def _parse_domain(self):
domain = Config.LSTM_CONFIG['domain']
names = []
type_attr = []
max_val = []
min_val = []
range_val = []
for attr in domain:
names.append(attr['name'])
type_attr.append(attr['type'])
if attr['type'] == 'discrete':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][len(attr['domain']) - 1])
elif attr['type'] == 'continuous':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][1])
range_val.append(attr['domain'])
Xsample=[]
for index,value in enumerate(type_attr):
if value == 'discrete':
_x = (np.random.choice(range_val[index])-min_val[index])/(max_val[index]-min_val[index])
#print(_x)
Xsample.append(_x)
if value == 'continuous':
_x = (np.random.rand() * (max_val[index] - min_val[index]))/(max_val[index]-min_val[index])
Xsample.append(_x)
self.name = names
self.type_attr = type_attr
self.max_val = np.array(max_val)
self.min_val = np.array(min_val)
self.range_val = range_val
self.x.append(Xsample)
#print(self.convert_sample(Xsample))
self.y.append(self.objective(self.decode_sample(self.convert_sample(Xsample)), cloud_metrics=self.cloud_metrics)[0])
def convert_sample(self,sample):
x = []
for i in range(len(sample)):
if i in [0,1]:
x.append(int(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i]))
elif i in [2,3,4]:
x.append(int(int(sample[int(i)]*(self.max_val[int(i)]-self.min_val[int(i)]))+self.min_val[int(i)]))
elif i in [5,6]:
x.append(sample[i]*(self.max_val[i]-self.min_val[i])+self.min_val[i])
else:
x.append(int(sample[i]*(self.max_val[i]-self.min_val[i])+self.min_val[i]))
return x #x_mem
def decode_sample(self, position):
result = {}
for i,name in enumerate(self.name):
result[name] = position[i]
return result
# surrogate or approximation for the objective function
def surrogate(self,model, X):
# catch any warning generated when making a prediction
with catch_warnings():
# ignore generated warnings
simplefilter("ignore")
return model.predict(X, return_std=True)
# probability of improvement acquisition function
def acquisition(self, X, Xsamples, model):
# calculate the best surrogate score found so far
yhat, _ = self.surrogate(model, X)
best = min(yhat)
# calculate mean and stdev via surrogate function
mu, std = self.surrogate(model, Xsamples)
try:
mu = mu[:, 0]
except:
mu=mu
# calculate the probability of improvement
probs = norm.cdf((mu - best) / (std+1E-9))
return probs
def opt_acquisition(self, X, y, model):
# random search, generate random samples
Xsamples = []
for j in range(100):
x_sample = []
for index,value in enumerate(self.type_attr):
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
if value == 'continuous':
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
Xsamples.append(x_sample)
# calculate the acquisition function for each sample
scores = self.acquisition(X, Xsamples, model)
ix = argmin(scores)
print(ix)
return Xsamples[ix]
def optimize(self):
model = GaussianProcessRegressor()
for i in range(self.max_iteration):
# select the next point to sample
x = self.opt_acquisition(self.x,self.y, model)
# sample the point
print(self.convert_sample(x))
print(x)
actual = self.objective(self.decode_sample(self.convert_sample(x)),cloud_metrics=self.cloud_metrics)[0]
# summarize the finding
est, _ = self.surrogate(model, [x])
#self.estimate=[0]
#print(self.x)
print('>x1={},x2={}, f()={}, actual={}'.format(x[0],x[1], est, actual))
# add the data to the dataset
if not math.isnan(actual):
self.x = vstack((self.x, [x]))
self.y = vstack((self.y, [actual]))
self.estimate.append(est)
#self.estimate.append(est)
#self.x = vstack((self.x, [x]))
#self.y = vstack((self.y, [actual]))
# update the model
model.fit(self.x, self.y)
ix = argmin(self.y)
print('Best Result: x1=%.3f,x2=%3f, y=%.3f' % (self.x[ix][0],self.x[ix][1], self.y[ix]))
return self.x[ix]
if Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "mem":
files = open("gaussprocess_singletask_mem_result.csv","w")
elif Config.GOOGLE_TRACE_DATA_CONFIG['train_data_type'] == "cpu":
files = open("gaussprocess_singletask_cpu_result.csv","w")
files.write("x;y_estimate;y_mem_actual\n")
for i in range(len(self.y)):
#print(i)
files.write("{};{};{}\n".format(self.decode_sample(self.x[i]),self.y_estimate[i], self.y[i]))
return self.x[optimal_sample_idx]
files.close()
class GaussProcess:
def __init__(self, objective_function):
self.objective_function = objective_function
self.x = [] # Chromosome that has fitness value
self.y = [] # Fit value of chromosome in X
self.name = []
self.estimate=[0]
self.cloud_metrics = {
'train_data_type': 'cpu',
'predict_data': 'cpu'
}
self.alpha = Config.ALPHA
self.population_size = Config.POPULATION_SIZE
self.max_iteration = Config.MAX_ITER
self.x_cpu , self.x_mem, self.y_cpu_actual, self.y_mem_actual = [], [] , [], []
self._parse_domain()
def gen_sample(self):
x_sample = []
for index, value in enumerate(self.type_attr):
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
#print(_x)
x_sample.append(_x)
#x_sample_memory.append(_x)
if value == 'continuous':
# _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr))
# _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5)
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
#x_sample_memory.append(_x)
if self.name[index] in ["sliding","network_size","layer_size"]:
if value == 'discrete':
_x = (np.random.choice(self.range_val[index])-self.min_val[index])/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
if value == 'continuous':
# _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr))
# _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5)
_x = (np.random.rand() * (self.max_val[index] - self.min_val[index]))/(self.max_val[index]-self.min_val[index])
x_sample.append(_x)
#print(x_sample)
return x_sample
def _parse_domain(self):
domain = Config.LSTM_CONFIG['domain']
names = []
type_attr = []
max_val = []
min_val = []
range_val = []
for attr in domain:
names.append(attr['name'])
type_attr.append(attr['type'])
if attr['type'] == 'discrete':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][len(attr['domain']) - 1])
elif attr['type'] == 'continuous':
min_val.append(attr['domain'][0])
max_val.append(attr['domain'][1])
range_val.append(attr['domain'])
self.name = names
self.type_attr = type_attr
self.max_val = np.array(max_val)
self.min_val = np.array(min_val)
self.range_val = range_val
x_sample = self.gen_sample()
print(x_sample)
self.x.append(x_sample)
x_cpu,x_mem = self.split_sample(x_sample)
self.x_cpu.append(self.decode_sample(x_cpu))
self.x_mem.append(self.decode_sample(x_mem))
y_cpu = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
y_mem = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
self.y_cpu_actual.append(y_cpu)
self.y_mem_actual.append(y_mem)
self.y.append(self.alpha*y_cpu + (1-self.alpha)*y_mem)
def split_sample(self,sample):
x_cpu = []
x_mem = []
#print(sample)
for i in range(len(sample)):
if i in [0,1]:
x_cpu.append(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i])
x_mem.append(int(sample[i]*(self.max_val[i]-self.min_val[i]))+self.min_val[i])
elif i in [2,4,6]:
x_cpu.append(int(sample[int(i-(i-2)/2)]*(self.max_val[int(i-(i-2)/2)]-self.min_val[int(i-(i-2)/2)]))+self.min_val[int(i-(i-2)/2)])
elif i in [3,5,7]:
x_mem.append(int(sample[int(i-1-(i-3)/2)]*(self.max_val[int(i-1-(i-3)/2)]-self.min_val[int(i-1-(i-3)/2)]))+self.min_val[int(i-1-(i-3)/2)])
elif i in [8,9]:
x_cpu.append(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])+self.min_val[i-3])
x_mem.append(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])+self.min_val[i-3])
else:
x_cpu.append(int(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])))
x_mem.append(int(sample[i-3]*(self.max_val[i-3]-self.min_val[i-3])))
#print(x_cpu,x_mem)
return x_cpu, x_mem
def decode_sample(self, sample):
result = {}
for i, name in enumerate(self.name):
if name in ["learning_rate","dropout"]:
result[name] = sample[i]
else:
result[name]=int(sample[i])
return result
# surrogate or approximation for the objective function
def surrogate(self, x):
# catch any warning generated when making a prediction
with catch_warnings():
# ignore generated warnings
simplefilter('ignore')
return self.gaussian_process_model.predict(x, return_std=True)
# probability of improvement acquisition function
def acquisition(self, x, x_samples):
# calculate the best surrogate score found so far
yhat, _ = self.surrogate(x)
best = min(yhat)
# calculate mean and stdev via surrogate function
mu, std = self.surrogate(x_samples)
try:
mu = mu[:, 0]
except:
mu = mu
# calculate the probability of improvement
probs = norm.cdf((mu - best) / (std + 1E-9))
return probs
def opt_acquisition(self, x):
# random search, generate random samples
x_samples = []
for j in range(self.population_size):
x_sample = self.gen_sample()
x_samples.append(x_sample)
#print(x[:,0])
#print("_____________________________")
#print(x_samples[:,0])
# calculate the acquisition function for each sample
scores = self.acquisition(x, x_samples)
min_sample_idx = argmin(scores)
#min_sample_idx2 = argmin(scores)
return x_samples[min_sample_idx]
def optimize(self):
self.gaussian_process_model = GaussianProcessRegressor()
#self.gaussian_process_model_mem = GaussianProcessRegressor()
for i in range(self.max_iteration):
# select the next point to sample
x = self.opt_acquisition(self.x)
# sample the point
x_cpu, x_mem = self.split_sample(x)
y_cpu_actual = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
y_mem_actual = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
actual = self.alpha*y_cpu_actual + (1-self.alpha)*y_mem_actual
# summarize the finding
est, _ = self.surrogate([x])
#est1, _1 = self.surrogate([x[0]],type="cpu")
#print(est)
print('>x={}, f()={}, actual={}'.format(x, est, actual))
#print('>x1={},c f()={}, actual={}'.format(x[1], est1, actual))
# add the data to the dataset
if not math.isnan(actual):
self.x_cpu.append(self.decode_sample(x_cpu))
self.x_mem.append(self.decode_sample(x_mem))
y_cpu = self.objective_function(self.decode_sample(x_cpu),cloud_metrics=self.cloud_metrics)[0]
#y_mem = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function
self.y_cpu_actual.append(y_cpu_actual)
self.y_mem_actual.append(y_mem_actual)
self.x = vstack((self.x, [x]))
self.y = vstack((self.y, [actual]))
self.estimate.append(est)
# update the gausian model
self.gaussian_process_model.fit(self.x, self.y)
#self.gaussian_process_model_mem.fit(self.x[:,0], self.y[:,0])
optimal_sample_idx = argmin(self.y)
print(f'Best Result: x1={self.x[optimal_sample_idx][0]},x2={self.x[optimal_sample_idx][1]}, y={self.y[optimal_sample_idx]}')
files = open("gaussprocess_mutitask_result.csv","w")
files.write("x_cpu;x_mem,y;y_cpu_actual;y_mem_actual\n")
print(len(self.x))
print(len(self.y))
print(len(self.estimate))
for i in range(len(self.y)):
print(i)
files.write("{};{};{};{};{}\n".format(self.x_cpu[i],self.x_mem[i] , self.estimate[i], self.y_cpu_actual[i], self.y_mem_actual[i]))
return self.x[optimal_sample_idx] | en | 0.512077 | # example of bayesian optimization for a 1d function from scratch #from skopt import gp_minimize # example of bayesian optimization for a 1d function from scratch #print(_x) #print(self.convert_sample(Xsample)) #x_mem # surrogate or approximation for the objective function # catch any warning generated when making a prediction # ignore generated warnings # probability of improvement acquisition function # calculate the best surrogate score found so far # calculate mean and stdev via surrogate function # calculate the probability of improvement # random search, generate random samples # calculate the acquisition function for each sample # select the next point to sample # sample the point # summarize the finding #self.estimate=[0] #print(self.x) # add the data to the dataset #self.estimate.append(est) #self.x = vstack((self.x, [x])) #self.y = vstack((self.y, [actual])) # update the model #print(i) # Chromosome that has fitness value # Fit value of chromosome in X #print(_x) #x_sample_memory.append(_x) # _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr)) # _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5) #x_sample_memory.append(_x) # _old_x = self.min_val + (self.max_val - self.min_val) * np.random.rand(len(self.type_attr)) # _x = np.round(np.random.rand() * (self.max_val[index] - self.min_val[index]) + self.min_val[index], 5) #print(x_sample) # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function #print(sample) #print(x_cpu,x_mem) # surrogate or approximation for the objective function # catch any warning generated when making a prediction # ignore generated warnings # probability of improvement acquisition function # calculate the best surrogate score found so far # calculate mean and stdev via surrogate function # calculate the probability of improvement # random search, generate random samples #print(x[:,0]) #print("_____________________________") #print(x_samples[:,0]) # calculate the acquisition function for each sample #min_sample_idx2 = argmin(scores) #self.gaussian_process_model_mem = GaussianProcessRegressor() # select the next point to sample # sample the point # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function # summarize the finding #est1, _1 = self.surrogate([x[0]],type="cpu") #print(est) #print('>x1={},c f()={}, actual={}'.format(x[1], est1, actual)) # add the data to the dataset #y_mem = self.objective_function(self.decode_sample(x_mem))[0] # @TODO thangbk2209 need to add fitness_type and cloud_metrics into objective_function # update the gausian model #self.gaussian_process_model_mem.fit(self.x[:,0], self.y[:,0]) | 2.466784 | 2 |
Kivy/Kivy/Bk_Interractive/My/C2/Image/image.py | pimier15/PyGUI | 0 | 6614081 | from kivy.app import App
from kivy.uix.relativelayout import RelativeLayout
class ImageDraw(RelativeLayout):
pass
class ImageApp(App):
def build(self):
return ImageDraw()
if __name__ == '__main__':
ImageApp().run()
| from kivy.app import App
from kivy.uix.relativelayout import RelativeLayout
class ImageDraw(RelativeLayout):
pass
class ImageApp(App):
def build(self):
return ImageDraw()
if __name__ == '__main__':
ImageApp().run()
| none | 1 | 2.017912 | 2 | |
scripts/prepare-submission.py | trmznt/ncov19-pipeline | 4 | 6614082 | <filename>scripts/prepare-submission.py
from seqpy import cout, cerr
from seqpy.core import bioio
from seqpy.cmds import arg_parser
import pandas as pd
def prepare_submission(args):
out_metadata = args.outprefix + '.csv'
out_fasta = args.outprefix + '.fas'
# open metadata file
if args.metafile.lower().endswith('.csv'):
separator = ','
elif args.metafile.lowe().endswith('.tsv'):
separator = '\t'
cerr(f'[Reading metadata file {args.metafile}]')
metadata_df = pd.read_table(args.metafile, sep=separator)
# make sure sequence name is a string (in case the the column is automatically
# converted to number)
metadata_df['fn'] = metadata_df['fn'].astype('str')
metadata_df['covv_assembly_method'] = metadata_df['covv_assembly_method'].astype('str')
metadata_df.set_index('fn', drop=False, inplace=True )
#import IPython; IPython.embed()
# open infile tsv
cerr(f'[Reading infile {args.infile}]')
submission_df = pd.read_table(args.infile, sep='\t')
# check for available field in submission_df
code_field = 'SAMPLE' if 'SAMPLE' in submission_df.columns else 'fn'
submission_df[code_field] = submission_df[code_field].astype('str')
# open sequence file
cerr(f'[Reading sequence file {args.seqfile}]')
mseq = bioio.load( args.seqfile )
mseq_keys = {}
for i in range(len(mseq)):
mseq_keys[ mseq[i].label ] = i
# iterate over submission_df
used = []
#import IPython; IPython.embed()
for (i, s) in submission_df.iterrows():
sample_id = s[code_field]
r = metadata_df.loc[sample_id]
if sample_id not in mseq_keys:
continue
cerr(f'[Preparing sample {sample_id}]')
# set coverage
# import IPython; IPython.embed()
metadata_df.at[sample_id, 'covv_coverage'] = s['AVGDEPTH']
metadata_df.at[sample_id, 'fn'] = out_fasta
metadata_df.at[sample_id, 'covv_seq_technology'] = args.covv_seq_technology
metadata_df.at[sample_id, 'covv_assembly_method'] = args.covv_assembly_method
# set sequence name
idx = mseq_keys[sample_id]
mseq[idx].label = r['covv_virus_name']
mseq[idx].seq = mseq[idx].seq.strip(b'-')
used.append(sample_id)
cerr(f'[Finish preparing sample {sample_id}]')
# remove unused metadata
metadata_df = metadata_df.loc[ used ]
# write to new fasta & metadata file
metadata_df.to_csv(out_metadata, sep=',', index=False)
bioio.save(mseq, out_fasta)
def init_argparser():
p = arg_parser('prepare GISAID submission files')
p.add_argument('--covv_assembly_method', default='custom minimap2 + iVar pipeline')
p.add_argument('--covv_seq_technology', default='Illumina NextSeq 550')
p.add_argument('--metafile', required=True)
p.add_argument('--seqfile', required=True)
p.add_argument('--outprefix', required=True)
p.add_argument('infile')
return p
def main( args ):
prepare_submission(args)
| <filename>scripts/prepare-submission.py
from seqpy import cout, cerr
from seqpy.core import bioio
from seqpy.cmds import arg_parser
import pandas as pd
def prepare_submission(args):
out_metadata = args.outprefix + '.csv'
out_fasta = args.outprefix + '.fas'
# open metadata file
if args.metafile.lower().endswith('.csv'):
separator = ','
elif args.metafile.lowe().endswith('.tsv'):
separator = '\t'
cerr(f'[Reading metadata file {args.metafile}]')
metadata_df = pd.read_table(args.metafile, sep=separator)
# make sure sequence name is a string (in case the the column is automatically
# converted to number)
metadata_df['fn'] = metadata_df['fn'].astype('str')
metadata_df['covv_assembly_method'] = metadata_df['covv_assembly_method'].astype('str')
metadata_df.set_index('fn', drop=False, inplace=True )
#import IPython; IPython.embed()
# open infile tsv
cerr(f'[Reading infile {args.infile}]')
submission_df = pd.read_table(args.infile, sep='\t')
# check for available field in submission_df
code_field = 'SAMPLE' if 'SAMPLE' in submission_df.columns else 'fn'
submission_df[code_field] = submission_df[code_field].astype('str')
# open sequence file
cerr(f'[Reading sequence file {args.seqfile}]')
mseq = bioio.load( args.seqfile )
mseq_keys = {}
for i in range(len(mseq)):
mseq_keys[ mseq[i].label ] = i
# iterate over submission_df
used = []
#import IPython; IPython.embed()
for (i, s) in submission_df.iterrows():
sample_id = s[code_field]
r = metadata_df.loc[sample_id]
if sample_id not in mseq_keys:
continue
cerr(f'[Preparing sample {sample_id}]')
# set coverage
# import IPython; IPython.embed()
metadata_df.at[sample_id, 'covv_coverage'] = s['AVGDEPTH']
metadata_df.at[sample_id, 'fn'] = out_fasta
metadata_df.at[sample_id, 'covv_seq_technology'] = args.covv_seq_technology
metadata_df.at[sample_id, 'covv_assembly_method'] = args.covv_assembly_method
# set sequence name
idx = mseq_keys[sample_id]
mseq[idx].label = r['covv_virus_name']
mseq[idx].seq = mseq[idx].seq.strip(b'-')
used.append(sample_id)
cerr(f'[Finish preparing sample {sample_id}]')
# remove unused metadata
metadata_df = metadata_df.loc[ used ]
# write to new fasta & metadata file
metadata_df.to_csv(out_metadata, sep=',', index=False)
bioio.save(mseq, out_fasta)
def init_argparser():
p = arg_parser('prepare GISAID submission files')
p.add_argument('--covv_assembly_method', default='custom minimap2 + iVar pipeline')
p.add_argument('--covv_seq_technology', default='Illumina NextSeq 550')
p.add_argument('--metafile', required=True)
p.add_argument('--seqfile', required=True)
p.add_argument('--outprefix', required=True)
p.add_argument('infile')
return p
def main( args ):
prepare_submission(args)
| en | 0.693658 | # open metadata file # make sure sequence name is a string (in case the the column is automatically # converted to number) #import IPython; IPython.embed() # open infile tsv # check for available field in submission_df # open sequence file # iterate over submission_df #import IPython; IPython.embed() # set coverage # import IPython; IPython.embed() # set sequence name # remove unused metadata # write to new fasta & metadata file | 2.476512 | 2 |
npy2bam.py | txje/sequence-bias-adjustment | 4 | 6614083 | import math
import numpy
import pysam
import argparse
import time
CHROMS = None
def main(chromfile, npyfile, bamfile, outbam, tag, mult, noy, chrom):
global CHROMS
CHROMS = [c.split()[0] for c in open(chromfile).read().strip().split('\n')]
if noy and "chrY" in CHROMS:
CHROMS.remove("chrY")
t0 = time.time()
print "Loading npy file."
reads = numpy.load(npyfile)
# format: (chromid, pos, reverse?, weight)
t1 = time.time()
print "%.2f seconds." % (t1 - t0)
print "Sorting npy file."
# sort reads by chrom, pos, rev
# they should already be in this order...
#reads.sort(key = lambda a: a[0]<<32 + a[1]<<1 + a[2])
t2 = time.time()
print "%.2f seconds." % (t2 - t1)
# skip zeros at the beginning
r = 0
while reads[r][0] == 0 and reads[r][1] == 0:
r += 1
zeros = r
print "Loading bam file."
refbam = pysam.AlignmentFile(bamfile, "rb")
if chrom is not None:
chrom = "chr"+chrom
numrefs = (1 if chrom else len(CHROMS))
t2 = time.time()
print "%.2f seconds." % (t2 - t1)
# get usable header
header = refbam.header
# REORDER CHROMOSOMES TO MATCH NPY FILE (CANONICAL ORDER)
header["SQ"].sort(key = lambda a: CHROMS.index(a["SN"]) if a["SN"] in CHROMS else 999999)
print [s["SN"] for s in header["SQ"]]
bam = pysam.Samfile(outbam, "wb", header=header)
print "Writing new bam file."
written = 0
for c in xrange(len(CHROMS)):
print "on to %s (%i), at read %i of %i" % (CHROMS[c], c, r, reads.shape[0])
while r < len(reads) and reads[r][0] < c:
#print "skipping read %i:%i:%i because chrom is %i" % (reads[r][0], reads[r][1], reads[r][2], c)
r += 1
if r >= len(reads):
print " no reads on %s (%i)" % (CHROMS[c], c)
break
for read in refbam.fetch(CHROMS[c]):
if reads[r][0] > c:
break
read.reference_id = c # SET THE REFID TO THE CURRENT CHROM ID
while r < len(reads) and reads[r][0] == c and (reads[r][1] < read.reference_start or (reads[r][2] == 0 and read.flag & 16 > 0)):
r += 1
if r < len(reads) and reads[r][0] == c and reads[r][1] == read.reference_start and (reads[r][2] * 16) == (read.flag & 16):
name = read.query_name
if tag:
#read.tags.append(('XW', reads[r][3]))
read.set_tag('XW', reads[r][3], 'f')
bam.write(read)
written += 1
else: # mult must be defined
for i in xrange(int(round(reads[r][3] * mult))):
read.query_name = name + "_%i" % i
bam.write(read)
written += 1
r += 1
print "%i reads in npy file" % r
print "%i reads after zero-filtering" % (r - zeros)
print "%i bam reads written" % written
bam.close()
refbam.close()
t3 = time.time()
print "%.2f seconds." % (t3 - t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simply duplicate BAM reads")
parser.add_argument("chroms", help="Chromosome file")
parser.add_argument("npy", help="Weighted numpy file")
parser.add_argument("bam", help="Unweighted BAM file")
parser.add_argument("out", help="BAM file to output")
parser.add_argument("--noy", help="Do not use chrY", action="store_true", default=False)
parser.add_argument("--tag", help="Use XW tag to indicate weight", action="store_true", default=False)
parser.add_argument("--mult", help="Weight multiplier", type=int)
parser.add_argument("--chrom", help="Convert a single chromosome")
args = parser.parse_args()
if args.tag and args.mult is not None:
print "Cannot specify --tag and --mult, pick one or the other"
else:
main(args.chroms, args.npy, args.bam, args.out, args.tag, args.mult, args.noy, args.chrom)
# python npy2bam.py $outdir/$prefix.${k}mer_adjusted.read_weights.npy $bamfiltered $outdir/$prefix.adjusted.bam --noy --tag
| import math
import numpy
import pysam
import argparse
import time
CHROMS = None
def main(chromfile, npyfile, bamfile, outbam, tag, mult, noy, chrom):
global CHROMS
CHROMS = [c.split()[0] for c in open(chromfile).read().strip().split('\n')]
if noy and "chrY" in CHROMS:
CHROMS.remove("chrY")
t0 = time.time()
print "Loading npy file."
reads = numpy.load(npyfile)
# format: (chromid, pos, reverse?, weight)
t1 = time.time()
print "%.2f seconds." % (t1 - t0)
print "Sorting npy file."
# sort reads by chrom, pos, rev
# they should already be in this order...
#reads.sort(key = lambda a: a[0]<<32 + a[1]<<1 + a[2])
t2 = time.time()
print "%.2f seconds." % (t2 - t1)
# skip zeros at the beginning
r = 0
while reads[r][0] == 0 and reads[r][1] == 0:
r += 1
zeros = r
print "Loading bam file."
refbam = pysam.AlignmentFile(bamfile, "rb")
if chrom is not None:
chrom = "chr"+chrom
numrefs = (1 if chrom else len(CHROMS))
t2 = time.time()
print "%.2f seconds." % (t2 - t1)
# get usable header
header = refbam.header
# REORDER CHROMOSOMES TO MATCH NPY FILE (CANONICAL ORDER)
header["SQ"].sort(key = lambda a: CHROMS.index(a["SN"]) if a["SN"] in CHROMS else 999999)
print [s["SN"] for s in header["SQ"]]
bam = pysam.Samfile(outbam, "wb", header=header)
print "Writing new bam file."
written = 0
for c in xrange(len(CHROMS)):
print "on to %s (%i), at read %i of %i" % (CHROMS[c], c, r, reads.shape[0])
while r < len(reads) and reads[r][0] < c:
#print "skipping read %i:%i:%i because chrom is %i" % (reads[r][0], reads[r][1], reads[r][2], c)
r += 1
if r >= len(reads):
print " no reads on %s (%i)" % (CHROMS[c], c)
break
for read in refbam.fetch(CHROMS[c]):
if reads[r][0] > c:
break
read.reference_id = c # SET THE REFID TO THE CURRENT CHROM ID
while r < len(reads) and reads[r][0] == c and (reads[r][1] < read.reference_start or (reads[r][2] == 0 and read.flag & 16 > 0)):
r += 1
if r < len(reads) and reads[r][0] == c and reads[r][1] == read.reference_start and (reads[r][2] * 16) == (read.flag & 16):
name = read.query_name
if tag:
#read.tags.append(('XW', reads[r][3]))
read.set_tag('XW', reads[r][3], 'f')
bam.write(read)
written += 1
else: # mult must be defined
for i in xrange(int(round(reads[r][3] * mult))):
read.query_name = name + "_%i" % i
bam.write(read)
written += 1
r += 1
print "%i reads in npy file" % r
print "%i reads after zero-filtering" % (r - zeros)
print "%i bam reads written" % written
bam.close()
refbam.close()
t3 = time.time()
print "%.2f seconds." % (t3 - t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simply duplicate BAM reads")
parser.add_argument("chroms", help="Chromosome file")
parser.add_argument("npy", help="Weighted numpy file")
parser.add_argument("bam", help="Unweighted BAM file")
parser.add_argument("out", help="BAM file to output")
parser.add_argument("--noy", help="Do not use chrY", action="store_true", default=False)
parser.add_argument("--tag", help="Use XW tag to indicate weight", action="store_true", default=False)
parser.add_argument("--mult", help="Weight multiplier", type=int)
parser.add_argument("--chrom", help="Convert a single chromosome")
args = parser.parse_args()
if args.tag and args.mult is not None:
print "Cannot specify --tag and --mult, pick one or the other"
else:
main(args.chroms, args.npy, args.bam, args.out, args.tag, args.mult, args.noy, args.chrom)
# python npy2bam.py $outdir/$prefix.${k}mer_adjusted.read_weights.npy $bamfiltered $outdir/$prefix.adjusted.bam --noy --tag
| en | 0.503601 | # format: (chromid, pos, reverse?, weight) # sort reads by chrom, pos, rev # they should already be in this order... #reads.sort(key = lambda a: a[0]<<32 + a[1]<<1 + a[2]) # skip zeros at the beginning # get usable header # REORDER CHROMOSOMES TO MATCH NPY FILE (CANONICAL ORDER) #print "skipping read %i:%i:%i because chrom is %i" % (reads[r][0], reads[r][1], reads[r][2], c) # SET THE REFID TO THE CURRENT CHROM ID #read.tags.append(('XW', reads[r][3])) # mult must be defined # python npy2bam.py $outdir/$prefix.${k}mer_adjusted.read_weights.npy $bamfiltered $outdir/$prefix.adjusted.bam --noy --tag | 2.618837 | 3 |
cesiumpy/util/trait.py | cksammons7/cesiumpy | 0 | 6614084 | <filename>cesiumpy/util/trait.py
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import collections
import traitlets
from enum import Enum
import cesiumpy.util.common as com
import cesiumpy.util.html as html
class MaybeTrait(traitlets.Instance):
def validate(self, obj, value):
if self.allow_none is True and value is None:
return super(MaybeTrait, self).validate(obj, value)
try:
value = self.klass.maybe(value)
except ValueError:
self.error(obj, value)
return super(MaybeTrait, self).validate(obj, value)
class URITrait(traitlets.Unicode):
def validate(self, obj, value):
if self.allow_none is True and value is None:
return super(URITrait, self).validate(obj, value)
if not html._check_uri(value):
self.error(obj, value)
return super(URITrait, self).validate(obj, value)
# --------------------------------------------------
# Container
# --------------------------------------------------
class _HTMLObject(traitlets.HasTraits):
def __eq__(self, other):
# conmpare with script
if isinstance(other, _HTMLObject):
return self.script == other.script
return False
@property
def script(self):
raise NotImplementedError
class _JavaScriptObject(_HTMLObject):
"""
Base class for JavaScript instances, which can be converted to
JavaScript instance
"""
@property
def _klass(self):
raise NotImplementedError('must be overriden in child classes')
return "Cesium.{0}".format(self.__class__.__name__)
@property
def _props(self):
raise NotImplementedError('must be overriden in child classes')
@property
def _property_dict(self):
props = collections.OrderedDict()
for p in self._props:
props[p] = getattr(self, p)
return props
@property
def script(self):
props = self._property_dict
results = com.to_jsobject(props)
return ''.join(results)
class _JavaScriptEnum(Enum):
@property
def script(self):
return self.value
class _DIV(_HTMLObject):
divid = traitlets.Unicode()
width = traitlets.Unicode()
height = traitlets.Unicode()
def __init__(self, divid=None, width='100%', height='100%'):
if divid is None:
divid = 'cesiumContainer'
self.divid = divid
self.width = width
self.height = height
@property
def script(self):
container = """<div id="{0}" style="width:{1}; height:{2};"><div>"""
return container.format(self.divid, self.width, self.height)
| <filename>cesiumpy/util/trait.py
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import collections
import traitlets
from enum import Enum
import cesiumpy.util.common as com
import cesiumpy.util.html as html
class MaybeTrait(traitlets.Instance):
def validate(self, obj, value):
if self.allow_none is True and value is None:
return super(MaybeTrait, self).validate(obj, value)
try:
value = self.klass.maybe(value)
except ValueError:
self.error(obj, value)
return super(MaybeTrait, self).validate(obj, value)
class URITrait(traitlets.Unicode):
def validate(self, obj, value):
if self.allow_none is True and value is None:
return super(URITrait, self).validate(obj, value)
if not html._check_uri(value):
self.error(obj, value)
return super(URITrait, self).validate(obj, value)
# --------------------------------------------------
# Container
# --------------------------------------------------
class _HTMLObject(traitlets.HasTraits):
def __eq__(self, other):
# conmpare with script
if isinstance(other, _HTMLObject):
return self.script == other.script
return False
@property
def script(self):
raise NotImplementedError
class _JavaScriptObject(_HTMLObject):
"""
Base class for JavaScript instances, which can be converted to
JavaScript instance
"""
@property
def _klass(self):
raise NotImplementedError('must be overriden in child classes')
return "Cesium.{0}".format(self.__class__.__name__)
@property
def _props(self):
raise NotImplementedError('must be overriden in child classes')
@property
def _property_dict(self):
props = collections.OrderedDict()
for p in self._props:
props[p] = getattr(self, p)
return props
@property
def script(self):
props = self._property_dict
results = com.to_jsobject(props)
return ''.join(results)
class _JavaScriptEnum(Enum):
@property
def script(self):
return self.value
class _DIV(_HTMLObject):
divid = traitlets.Unicode()
width = traitlets.Unicode()
height = traitlets.Unicode()
def __init__(self, divid=None, width='100%', height='100%'):
if divid is None:
divid = 'cesiumContainer'
self.divid = divid
self.width = width
self.height = height
@property
def script(self):
container = """<div id="{0}" style="width:{1}; height:{2};"><div>"""
return container.format(self.divid, self.width, self.height)
| en | 0.377872 | #!/usr/bin/env python # coding: utf-8 # -------------------------------------------------- # Container # -------------------------------------------------- # conmpare with script Base class for JavaScript instances, which can be converted to JavaScript instance <div id="{0}" style="width:{1}; height:{2};"><div> | 2.079686 | 2 |
assets/tuned/daemon/tuned/plugins/plugin_sysfs.py | sjug/cluster-node-tuning-operator | 53 | 6614085 | <gh_stars>10-100
from . import base
import glob
import re
import os.path
from .decorators import *
import tuned.logs
from subprocess import *
from tuned.utils.commands import commands
log = tuned.logs.get()
class SysfsPlugin(base.Plugin):
"""
Plugin for applying custom sysfs options, using specific plugins is preferred.
"""
# TODO: resolve possible conflicts with sysctl settings from other plugins
def __init__(self, *args, **kwargs):
super(SysfsPlugin, self).__init__(*args, **kwargs)
self._has_dynamic_options = True
self._cmd = commands()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
instance._sysfs = dict([(os.path.normpath(key_value[0]), key_value[1]) for key_value in list(instance.options.items())])
instance._sysfs_original = {}
def _instance_cleanup(self, instance):
pass
def _instance_apply_static(self, instance):
for key, value in list(instance._sysfs.items()):
v = self._variables.expand(value)
for f in glob.iglob(key):
if self._check_sysfs(f):
instance._sysfs_original[f] = self._read_sysfs(f)
self._write_sysfs(f, v)
else:
log.error("rejecting write to '%s' (not inside /sys)" % f)
def _instance_verify_static(self, instance, ignore_missing, devices):
ret = True
for key, value in list(instance._sysfs.items()):
v = self._variables.expand(value)
for f in glob.iglob(key):
if self._check_sysfs(f):
curr_val = self._read_sysfs(f)
if self._verify_value(f, v, curr_val, ignore_missing) == False:
ret = False
return ret
def _instance_unapply_static(self, instance, full_rollback = False):
for key, value in list(instance._sysfs_original.items()):
self._write_sysfs(key, value)
def _check_sysfs(self, sysfs_file):
return re.match(r"^/sys/.*", sysfs_file)
def _read_sysfs(self, sysfs_file):
data = self._cmd.read_file(sysfs_file).strip()
if len(data) > 0:
return self._cmd.get_active_option(data, False)
else:
return None
def _write_sysfs(self, sysfs_file, value):
return self._cmd.write_to_file(sysfs_file, value)
| from . import base
import glob
import re
import os.path
from .decorators import *
import tuned.logs
from subprocess import *
from tuned.utils.commands import commands
log = tuned.logs.get()
class SysfsPlugin(base.Plugin):
"""
Plugin for applying custom sysfs options, using specific plugins is preferred.
"""
# TODO: resolve possible conflicts with sysctl settings from other plugins
def __init__(self, *args, **kwargs):
super(SysfsPlugin, self).__init__(*args, **kwargs)
self._has_dynamic_options = True
self._cmd = commands()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
instance._sysfs = dict([(os.path.normpath(key_value[0]), key_value[1]) for key_value in list(instance.options.items())])
instance._sysfs_original = {}
def _instance_cleanup(self, instance):
pass
def _instance_apply_static(self, instance):
for key, value in list(instance._sysfs.items()):
v = self._variables.expand(value)
for f in glob.iglob(key):
if self._check_sysfs(f):
instance._sysfs_original[f] = self._read_sysfs(f)
self._write_sysfs(f, v)
else:
log.error("rejecting write to '%s' (not inside /sys)" % f)
def _instance_verify_static(self, instance, ignore_missing, devices):
ret = True
for key, value in list(instance._sysfs.items()):
v = self._variables.expand(value)
for f in glob.iglob(key):
if self._check_sysfs(f):
curr_val = self._read_sysfs(f)
if self._verify_value(f, v, curr_val, ignore_missing) == False:
ret = False
return ret
def _instance_unapply_static(self, instance, full_rollback = False):
for key, value in list(instance._sysfs_original.items()):
self._write_sysfs(key, value)
def _check_sysfs(self, sysfs_file):
return re.match(r"^/sys/.*", sysfs_file)
def _read_sysfs(self, sysfs_file):
data = self._cmd.read_file(sysfs_file).strip()
if len(data) > 0:
return self._cmd.get_active_option(data, False)
else:
return None
def _write_sysfs(self, sysfs_file, value):
return self._cmd.write_to_file(sysfs_file, value) | en | 0.736789 | Plugin for applying custom sysfs options, using specific plugins is preferred. # TODO: resolve possible conflicts with sysctl settings from other plugins | 2.157221 | 2 |
levantamento_dados/releases/seleciona_releases_entre_datas.py | carlosdenner/github | 1 | 6614086 | <gh_stars>1-10
import json
import datetime
from dateutil import parser
def ler_arquivo_json(nome_arquivo):
with open(nome_arquivo, 'r', encoding='utf8') as f:
return json.load(f)
def gravar_arquivo_json(nome_arquivo, dados):
with open(nome_arquivo, 'w', encoding='utf-8') as f:
json.dump(dados, f, ensure_ascii=False, indent=2, sort_keys=False, separators=(',' , ':'))
#================================================================================#
# MAIN #
#================================================================================#
print('Informe o arquivo.json dos repositórios com suas releases: ')
nome_arquivo = input()
arquivo_json = ler_arquivo_json(nome_arquivo)
arquivo_json_saida = []
for i in range(len(arquivo_json)):
lista_releases_saida = []
lista_releases = arquivo_json[i]['releases']
for x in range(len(lista_releases)):
data_hora = parser.parse(lista_releases[x]['created_at'])
data = datetime.datetime.strftime(data_hora, "%d-%m-%Y")
dataformatdate = datetime.datetime.strptime(data, "%d-%m-%Y")
print(dataformatdate)
datainicio = datetime.datetime.strptime('01-02-2012' , "%d-%m-%Y")
datafim = datetime.datetime.strptime('01-06-2019' , "%d-%m-%Y")
if dataformatdate >= datainicio and dataformatdate < datafim:
lista_releases_saida.append(lista_releases[x])
else:
print("Release não está entre o período definido!")
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['name'] = arquivo_json[i]['name']
registro['url'] = arquivo_json[i]['url']
registro['created_at'] = arquivo_json[i]['created_at']
registro['num_watchers'] = arquivo_json[i]['num_watchers']
registro['releases'] = lista_releases_saida
arquivo_json_saida.append(registro)
nome_arquivo_saida = f'entre-datas-{nome_arquivo}'
gravar_arquivo_json(nome_arquivo_saida,arquivo_json_saida) | import json
import datetime
from dateutil import parser
def ler_arquivo_json(nome_arquivo):
with open(nome_arquivo, 'r', encoding='utf8') as f:
return json.load(f)
def gravar_arquivo_json(nome_arquivo, dados):
with open(nome_arquivo, 'w', encoding='utf-8') as f:
json.dump(dados, f, ensure_ascii=False, indent=2, sort_keys=False, separators=(',' , ':'))
#================================================================================#
# MAIN #
#================================================================================#
print('Informe o arquivo.json dos repositórios com suas releases: ')
nome_arquivo = input()
arquivo_json = ler_arquivo_json(nome_arquivo)
arquivo_json_saida = []
for i in range(len(arquivo_json)):
lista_releases_saida = []
lista_releases = arquivo_json[i]['releases']
for x in range(len(lista_releases)):
data_hora = parser.parse(lista_releases[x]['created_at'])
data = datetime.datetime.strftime(data_hora, "%d-%m-%Y")
dataformatdate = datetime.datetime.strptime(data, "%d-%m-%Y")
print(dataformatdate)
datainicio = datetime.datetime.strptime('01-02-2012' , "%d-%m-%Y")
datafim = datetime.datetime.strptime('01-06-2019' , "%d-%m-%Y")
if dataformatdate >= datainicio and dataformatdate < datafim:
lista_releases_saida.append(lista_releases[x])
else:
print("Release não está entre o período definido!")
registro = {}
registro['id'] = arquivo_json[i]['id']
registro['name'] = arquivo_json[i]['name']
registro['url'] = arquivo_json[i]['url']
registro['created_at'] = arquivo_json[i]['created_at']
registro['num_watchers'] = arquivo_json[i]['num_watchers']
registro['releases'] = lista_releases_saida
arquivo_json_saida.append(registro)
nome_arquivo_saida = f'entre-datas-{nome_arquivo}'
gravar_arquivo_json(nome_arquivo_saida,arquivo_json_saida) | fr | 0.343023 | #================================================================================# # MAIN # #================================================================================# | 3.195763 | 3 |
methods/long_rhythm_distance.py | Melamoto/ML-Melody-Composition | 0 | 6614087 | # -*- coding: utf-8 -*-
"""
A hamming-distance-based model for predicting long term rhythmic patterns
"""
from rhythm_hmm import Rhythm, makeRhythmSamples
import math
import numpy as np
from scipy.cluster.vq import vq, kmeans
from scipy.stats import binom
import pdb
class StructuredRhythm(Rhythm):
def __init__(self, ticksPerBar):
super().__init__()
self.ticksPerBar = ticksPerBar
def bars(self):
return math.ceil(len(self.timesteps)/self.ticksPerBar)
class RhythmDistanceModel:
def __init__(self, barLen, barCount, clusterCount, partitions=None):
self.partitions = partitions
self.barCount = barCount
self.barLen = barLen
self.weights = np.zeros((barCount,barCount,clusterCount))
self.probs = np.zeros((barCount,barCount,clusterCount))
self.clusterCount = clusterCount
self.converged = False
self.minimumDistanceProb = 1/(self.barLen+1)
self.maximumDistanceProb = 1 - self.minimumDistanceProb
def train(self, rhythms, convergence=0.000001, maxIters=10000):
for rhy in rhythms:
assert len(rhy) == self.barCount*self.barLen, "Rhythms must correct number of measures and length"
for i in range(self.barCount-1):
for j in range(i+1,self.barCount):
#pdb.set_trace()
dists = [distance(r, i, j, self.barLen) for r in rhythms]
alphas = [alphaDist(r, i, j, self.barLen) for r in rhythms]
betas = [betaDist(r, i, j, self.barLen) for r in rhythms]
# Initialise parameter estimates
ijDS = np.zeros(len(rhythms))
for r in range(len(rhythms)):
if alphas[r] - betas[r] == 0:
ijDS[r] = 0
else:
ijDS[r] = (dists[r] - betas[r])/(alphas[r] - betas[r])
ijDS[r] = max(min(ijDS[r],self.maximumDistanceProb),self.minimumDistanceProb)
centroids = kmeans(ijDS, self.clusterCount)[0]
# TODO: Bit of a hack, but necessary in some form
while len(centroids) < self.clusterCount:
centroids = np.append(centroids, centroids[-1])
code = vq(ijDS, centroids)[0]
for k in range(self.clusterCount):
n = sum(c == k for c in code)
self.weights[i][j][k] = n / len(rhythms)
self.probs[i][j][k] = centroids[k]
# Use iterative EM to refine parameters
converged = False
iters = 0
while (not converged) and (iters < maxIters):
converged = True
iters += 1
clusterProbs = np.zeros((self.clusterCount,len(rhythms)))
for k in range(self.clusterCount):
for r in range(len(rhythms)):
"""
TODO: Not sure about using this; the paper says to
use dist but I think it's a typo - it doesn't make
that much sense otherwise
"""
delta = dists[r] - betas[r]
clusterProbs[k][r] = (
self.weights[i][j][k] *
self.gradientBinomialDistanceProb(delta,alphas[r],betas[r],self.probs[i][j][k]))
# Normalize cluster probabilities s.t. the total prob
# across clusters for a given rhythm is 1
np.divide(clusterProbs, np.sum(clusterProbs,0))
for k in range(self.clusterCount):
numerator = 0.0
denominator = 0.0
for r in range(len(rhythms)):
numerator += (dists[r] - betas[r]) * clusterProbs[k][r]
denominator += (alphas[r] - betas[r]) * clusterProbs[k][r]
oldProb = self.probs[i][j][k]
oldWeight = self.weights[i][j][k]
if denominator == 0:
self.probs[i][j][k] = 0
else:
self.probs[i][j][k] = numerator/denominator
self.probs[i][j][k] = max(min(
self.probs[i][j][k],
self.maximumDistanceProb),
self.minimumDistanceProb)
self.weights[i][j][k] = np.sum(clusterProbs[k])/len(rhythms)
if abs(self.probs[i][j][k]-oldProb)/self.probs[i][j][k] > convergence:
converged = False
if abs(self.weights[i][j][k]-oldWeight)/self.weights[i][j][k] > convergence:
converged = False
self.converged = converged
# Returns a log probability of "bar" succeeding "rhythm" according to this
# model
def score(self, rhythm, bar):
assert len(rhythm) % self.barLen == 0, "Rhythm length must be divisible by bar length"
assert len(bar) == self.barLen, "Input bar has incorrect length"
totalProb = 0.0
combinedRhythm = np.concatenate([rhythm, bar])
j = int(len(rhythm) / self.barLen)
for i in range(j):
dist = distance(combinedRhythm, i, j, self.barLen)
alpha = alphaDist(combinedRhythm, i, j, self.barLen)
beta = betaDist(combinedRhythm, i, j, self.barLen)
delta = dist - beta
iProb = 0.0
for k in range(self.clusterCount):
iProb += self.weights[i][j][k] * self.gradientBinomialDistanceProb(delta,alpha,beta,self.probs[i][j][k])
totalProb += np.log(iProb)
return totalProb
# As binomialDistanceProb below, but adds a gradient to impossible distance
# value probabilities, so that all probabilities are non-zero and "more
# impossible" values have lower probability
def gradientBinomialDistanceProb(self, delta, alpha, beta, prob):
if alpha - beta == 0:
if delta == 0:
return 1
else:
return self.minimumDistanceProb**(1+delta)
return max(min(
binom.pmf(delta, alpha - beta, prob),
self.maximumDistanceProb),
self.minimumDistanceProb)
def generateNextBar(rdm, hmm, lam, rhythm, partitions=None):
assert len(rhythm) % rdm.barLen == 0, "Rhythm length must be divisible by bar length"
assert len(rhythm) < rdm.barLen * rdm.barCount, "Rhythm length must be less than distance model maximum"
# Generate notes
# TODO: Use predict_proba instead to achieve a more accurate range of results
#startState = hmm.predict(rhythm)[-1]
#startStateProbs = [0]*len(hmm.startprob_)
#startStateProbs[startState] = 1.0
startStateProbs = hmm.predict_proba(rhythm)[-1]
tempProbs = hmm.startprob_
hmm.startprob_ = startStateProbs
startSymbol = hmm.sample(1)[0][0]
barOut = np.concatenate(hmm.sample(rdm.barLen+1)[0])[1:]
rhythmSteps = np.concatenate(rhythm)
end = False
while end == False:
end = True
for j in range(rdm.barLen):
startVal = barOut[j]
bestVal = 0
bestScore = -np.inf
for newVal in range(3):
newBar = barOut
newBar[j] = newVal
hmmScore = hmm.score(np.concatenate([startSymbol,newBar]).reshape(-1,1))
distScore = rdm.score(rhythmSteps, newBar)
newScore = hmmScore + (lam * distScore)
if newScore > bestScore:
bestScore = newScore
bestVal = newVal
barOut[j] = bestVal
# Converge only when no values are changed
if bestVal != startVal:
end = False
hmm.startprob_ = tempProbs
return barOut
def makeTrackStructuredRhythm(track, ticksPerBar):
assert track.isMonophonic(), "Only monophonic tracks can be enscribed"
rhythm = StructuredRhythm(ticksPerBar)
rhythm.timesteps = [0]*track.length
noteStart = 0
noteEnd = 0
n = -1
for t in range(track.length):
if noteEnd <= t:
n = n + 1
noteStart = track.notes[n].start
noteEnd = track.notes[n].start + track.notes[n].duration
if t == noteStart:
rhythm.timesteps[t] = 1
elif noteStart < t and t < noteEnd:
rhythm.timesteps[t] = 2
return rhythm
def distance(rhythm, barA, barB, ticksPerBar):
tickA = ticksPerBar * barA
tickB = ticksPerBar * barB
d = 0
for i in range(ticksPerBar):
if rhythm[tickA+i] != rhythm[tickB+i]:
d = d + 1
return d
def alphaDist(rhythm, barA, barB, ticksPerBar):
greater = barB
lesser = barA
if barA > barB:
greater = barA
lesser = barB
if lesser == 0:
return distance(rhythm, barA, barB, ticksPerBar)
alpha = math.inf
for i in range(lesser):
iAlpha = distance(rhythm, lesser, i, ticksPerBar) + distance(rhythm, greater, i, ticksPerBar)
if iAlpha < alpha:
alpha = iAlpha
return alpha
def betaDist(rhythm, barA, barB, ticksPerBar):
greater = barB
lesser = barA
if barA > barB:
greater = barA
lesser = barB
if lesser == 0:
return distance(rhythm, barA, barB, ticksPerBar)
beta = -math.inf
for i in range(lesser):
iBeta = abs(distance(rhythm, lesser, i, ticksPerBar) - distance(rhythm, greater, i, ticksPerBar))
if iBeta > beta:
beta = iBeta
return beta
def binomialDistanceProb(delta, alpha, beta, prob):
if alpha - beta == 0:
if delta == 0:
return 1
else:
# This causes a gradient of 0 among "impossible" distance
# values - making gradient ascent impossible. For cases where
# gradient ascent is needed, use gradientBinomialDistanceProb
return 0
return binom.pmf(delta, alpha - beta, prob)
| # -*- coding: utf-8 -*-
"""
A hamming-distance-based model for predicting long term rhythmic patterns
"""
from rhythm_hmm import Rhythm, makeRhythmSamples
import math
import numpy as np
from scipy.cluster.vq import vq, kmeans
from scipy.stats import binom
import pdb
class StructuredRhythm(Rhythm):
def __init__(self, ticksPerBar):
super().__init__()
self.ticksPerBar = ticksPerBar
def bars(self):
return math.ceil(len(self.timesteps)/self.ticksPerBar)
class RhythmDistanceModel:
def __init__(self, barLen, barCount, clusterCount, partitions=None):
self.partitions = partitions
self.barCount = barCount
self.barLen = barLen
self.weights = np.zeros((barCount,barCount,clusterCount))
self.probs = np.zeros((barCount,barCount,clusterCount))
self.clusterCount = clusterCount
self.converged = False
self.minimumDistanceProb = 1/(self.barLen+1)
self.maximumDistanceProb = 1 - self.minimumDistanceProb
def train(self, rhythms, convergence=0.000001, maxIters=10000):
for rhy in rhythms:
assert len(rhy) == self.barCount*self.barLen, "Rhythms must correct number of measures and length"
for i in range(self.barCount-1):
for j in range(i+1,self.barCount):
#pdb.set_trace()
dists = [distance(r, i, j, self.barLen) for r in rhythms]
alphas = [alphaDist(r, i, j, self.barLen) for r in rhythms]
betas = [betaDist(r, i, j, self.barLen) for r in rhythms]
# Initialise parameter estimates
ijDS = np.zeros(len(rhythms))
for r in range(len(rhythms)):
if alphas[r] - betas[r] == 0:
ijDS[r] = 0
else:
ijDS[r] = (dists[r] - betas[r])/(alphas[r] - betas[r])
ijDS[r] = max(min(ijDS[r],self.maximumDistanceProb),self.minimumDistanceProb)
centroids = kmeans(ijDS, self.clusterCount)[0]
# TODO: Bit of a hack, but necessary in some form
while len(centroids) < self.clusterCount:
centroids = np.append(centroids, centroids[-1])
code = vq(ijDS, centroids)[0]
for k in range(self.clusterCount):
n = sum(c == k for c in code)
self.weights[i][j][k] = n / len(rhythms)
self.probs[i][j][k] = centroids[k]
# Use iterative EM to refine parameters
converged = False
iters = 0
while (not converged) and (iters < maxIters):
converged = True
iters += 1
clusterProbs = np.zeros((self.clusterCount,len(rhythms)))
for k in range(self.clusterCount):
for r in range(len(rhythms)):
"""
TODO: Not sure about using this; the paper says to
use dist but I think it's a typo - it doesn't make
that much sense otherwise
"""
delta = dists[r] - betas[r]
clusterProbs[k][r] = (
self.weights[i][j][k] *
self.gradientBinomialDistanceProb(delta,alphas[r],betas[r],self.probs[i][j][k]))
# Normalize cluster probabilities s.t. the total prob
# across clusters for a given rhythm is 1
np.divide(clusterProbs, np.sum(clusterProbs,0))
for k in range(self.clusterCount):
numerator = 0.0
denominator = 0.0
for r in range(len(rhythms)):
numerator += (dists[r] - betas[r]) * clusterProbs[k][r]
denominator += (alphas[r] - betas[r]) * clusterProbs[k][r]
oldProb = self.probs[i][j][k]
oldWeight = self.weights[i][j][k]
if denominator == 0:
self.probs[i][j][k] = 0
else:
self.probs[i][j][k] = numerator/denominator
self.probs[i][j][k] = max(min(
self.probs[i][j][k],
self.maximumDistanceProb),
self.minimumDistanceProb)
self.weights[i][j][k] = np.sum(clusterProbs[k])/len(rhythms)
if abs(self.probs[i][j][k]-oldProb)/self.probs[i][j][k] > convergence:
converged = False
if abs(self.weights[i][j][k]-oldWeight)/self.weights[i][j][k] > convergence:
converged = False
self.converged = converged
# Returns a log probability of "bar" succeeding "rhythm" according to this
# model
def score(self, rhythm, bar):
assert len(rhythm) % self.barLen == 0, "Rhythm length must be divisible by bar length"
assert len(bar) == self.barLen, "Input bar has incorrect length"
totalProb = 0.0
combinedRhythm = np.concatenate([rhythm, bar])
j = int(len(rhythm) / self.barLen)
for i in range(j):
dist = distance(combinedRhythm, i, j, self.barLen)
alpha = alphaDist(combinedRhythm, i, j, self.barLen)
beta = betaDist(combinedRhythm, i, j, self.barLen)
delta = dist - beta
iProb = 0.0
for k in range(self.clusterCount):
iProb += self.weights[i][j][k] * self.gradientBinomialDistanceProb(delta,alpha,beta,self.probs[i][j][k])
totalProb += np.log(iProb)
return totalProb
# As binomialDistanceProb below, but adds a gradient to impossible distance
# value probabilities, so that all probabilities are non-zero and "more
# impossible" values have lower probability
def gradientBinomialDistanceProb(self, delta, alpha, beta, prob):
if alpha - beta == 0:
if delta == 0:
return 1
else:
return self.minimumDistanceProb**(1+delta)
return max(min(
binom.pmf(delta, alpha - beta, prob),
self.maximumDistanceProb),
self.minimumDistanceProb)
def generateNextBar(rdm, hmm, lam, rhythm, partitions=None):
assert len(rhythm) % rdm.barLen == 0, "Rhythm length must be divisible by bar length"
assert len(rhythm) < rdm.barLen * rdm.barCount, "Rhythm length must be less than distance model maximum"
# Generate notes
# TODO: Use predict_proba instead to achieve a more accurate range of results
#startState = hmm.predict(rhythm)[-1]
#startStateProbs = [0]*len(hmm.startprob_)
#startStateProbs[startState] = 1.0
startStateProbs = hmm.predict_proba(rhythm)[-1]
tempProbs = hmm.startprob_
hmm.startprob_ = startStateProbs
startSymbol = hmm.sample(1)[0][0]
barOut = np.concatenate(hmm.sample(rdm.barLen+1)[0])[1:]
rhythmSteps = np.concatenate(rhythm)
end = False
while end == False:
end = True
for j in range(rdm.barLen):
startVal = barOut[j]
bestVal = 0
bestScore = -np.inf
for newVal in range(3):
newBar = barOut
newBar[j] = newVal
hmmScore = hmm.score(np.concatenate([startSymbol,newBar]).reshape(-1,1))
distScore = rdm.score(rhythmSteps, newBar)
newScore = hmmScore + (lam * distScore)
if newScore > bestScore:
bestScore = newScore
bestVal = newVal
barOut[j] = bestVal
# Converge only when no values are changed
if bestVal != startVal:
end = False
hmm.startprob_ = tempProbs
return barOut
def makeTrackStructuredRhythm(track, ticksPerBar):
assert track.isMonophonic(), "Only monophonic tracks can be enscribed"
rhythm = StructuredRhythm(ticksPerBar)
rhythm.timesteps = [0]*track.length
noteStart = 0
noteEnd = 0
n = -1
for t in range(track.length):
if noteEnd <= t:
n = n + 1
noteStart = track.notes[n].start
noteEnd = track.notes[n].start + track.notes[n].duration
if t == noteStart:
rhythm.timesteps[t] = 1
elif noteStart < t and t < noteEnd:
rhythm.timesteps[t] = 2
return rhythm
def distance(rhythm, barA, barB, ticksPerBar):
tickA = ticksPerBar * barA
tickB = ticksPerBar * barB
d = 0
for i in range(ticksPerBar):
if rhythm[tickA+i] != rhythm[tickB+i]:
d = d + 1
return d
def alphaDist(rhythm, barA, barB, ticksPerBar):
greater = barB
lesser = barA
if barA > barB:
greater = barA
lesser = barB
if lesser == 0:
return distance(rhythm, barA, barB, ticksPerBar)
alpha = math.inf
for i in range(lesser):
iAlpha = distance(rhythm, lesser, i, ticksPerBar) + distance(rhythm, greater, i, ticksPerBar)
if iAlpha < alpha:
alpha = iAlpha
return alpha
def betaDist(rhythm, barA, barB, ticksPerBar):
greater = barB
lesser = barA
if barA > barB:
greater = barA
lesser = barB
if lesser == 0:
return distance(rhythm, barA, barB, ticksPerBar)
beta = -math.inf
for i in range(lesser):
iBeta = abs(distance(rhythm, lesser, i, ticksPerBar) - distance(rhythm, greater, i, ticksPerBar))
if iBeta > beta:
beta = iBeta
return beta
def binomialDistanceProb(delta, alpha, beta, prob):
if alpha - beta == 0:
if delta == 0:
return 1
else:
# This causes a gradient of 0 among "impossible" distance
# values - making gradient ascent impossible. For cases where
# gradient ascent is needed, use gradientBinomialDistanceProb
return 0
return binom.pmf(delta, alpha - beta, prob)
| en | 0.767081 | # -*- coding: utf-8 -*- A hamming-distance-based model for predicting long term rhythmic patterns #pdb.set_trace() # Initialise parameter estimates # TODO: Bit of a hack, but necessary in some form # Use iterative EM to refine parameters TODO: Not sure about using this; the paper says to use dist but I think it's a typo - it doesn't make that much sense otherwise # Normalize cluster probabilities s.t. the total prob # across clusters for a given rhythm is 1 # Returns a log probability of "bar" succeeding "rhythm" according to this # model # As binomialDistanceProb below, but adds a gradient to impossible distance # value probabilities, so that all probabilities are non-zero and "more # impossible" values have lower probability # Generate notes # TODO: Use predict_proba instead to achieve a more accurate range of results #startState = hmm.predict(rhythm)[-1] #startStateProbs = [0]*len(hmm.startprob_) #startStateProbs[startState] = 1.0 # Converge only when no values are changed # This causes a gradient of 0 among "impossible" distance # values - making gradient ascent impossible. For cases where # gradient ascent is needed, use gradientBinomialDistanceProb | 2.990326 | 3 |
tools/graph/SigPlot.py | pershint/reacdb | 0 | 6614088 | from __future__ import print_function
import couchdb
import matplotlib.pyplot as plt
import numpy as np
import sys
couch = couchdb.Server()
def connectToDB(dbName):
status = "ok"
db = {}
try:
db = couch[dbName]
except:
print("Failed to connect to " + dbName, file = sys.stderr)
status = "bad"
return status, db
def getSigs():
"""
Get significance factors for all reactors from reacdb/static.
The name and significance list are returned as numpy arrays.
"""
Reac_names = []
Reac_sigs = []
dbStatus, db = connectToDB('reacdb')
if dbStatus is "ok":
queryresult = db.view('reacdb/static-significance', descending=True)
for row in queryresult:
try:
doc = db[row.id]
Reac_names.append(doc['reactor_name'])
Reac_sigs.append(doc['significance_factor'])
except:
print("error at" + str(row.id) + "in grabbed query.")
Reac_names = np.array(Reac_names)
Reac_sigs = np.array(Reac_sigs)
return Reac_names, Reac_sigs
def cumulSum(array):
"""
Takes in an array of numbers and returns an array with the cumulative
sum values. Example: cumulSum([2,4,5,7])
will return [7, 12, 16, 18].
"""
array = np.sort(array)[::-1] #Sorts any array that's not low to high
csarray = np.zeros(array.size)
for i, val in enumerate(array):
if i == 0:
csarray[i] = val
else:
csarray[i] = csarray[i-1] + val
print(csarray)
return csarray
def normalizeArrMax(array):
"""
Takes in any numpyarray and normalizes it's elements by the largest value.
"""
largest = np.amax(array)
for i,entry in enumerate(array):
array[i] = entry/largest
print(array)
return array
def normalizeArrSum(array):
"""
Takes in any numpy array and normalizes all elements by the sum of all values.
"""
largest = np.sum(array)
for i,entry in enumerate(array):
array[i] = entry/largest
print(array)
return array
def plotAxes(x,y):
"""
Takes in two arrays and plots them in a bar graph.
The x array should contain the labels for each bar, and the y array
contains the values for each bar.
"""
nreacs = len(x)
bar_width = 0.02
opacity = 0.4
fig, ax = plt.subplots()
plt.gcf().subplots_adjust(bottom=0.2)
index = np.arange(nreacs)
plt.bar(index, y, alpha=opacity, color='g')
plt.xlabel('Reactor Core Name')
plt.ylabel('% Significance')
plt.title(r'Cumulative Sum of US/CA Reactor Significances for SNO+' + \
r'AntiNu flux (~$\frac{MWt}{D^2}$)',y=1.02, fontsize=19)
plt.xticks(index + bar_width, x, rotation='vertical',y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
if __name__ == '__main__':
x,y = getSigs()
#y_n = normalizeArrSum(y)
#plotAxes(x,y_n)
#Uncomment to plot cumulative sum
y_cs = cumulSum(y)
y_cs = normalizeArrMax(y_cs)
plotAxes(x,y_cs)
| from __future__ import print_function
import couchdb
import matplotlib.pyplot as plt
import numpy as np
import sys
couch = couchdb.Server()
def connectToDB(dbName):
status = "ok"
db = {}
try:
db = couch[dbName]
except:
print("Failed to connect to " + dbName, file = sys.stderr)
status = "bad"
return status, db
def getSigs():
"""
Get significance factors for all reactors from reacdb/static.
The name and significance list are returned as numpy arrays.
"""
Reac_names = []
Reac_sigs = []
dbStatus, db = connectToDB('reacdb')
if dbStatus is "ok":
queryresult = db.view('reacdb/static-significance', descending=True)
for row in queryresult:
try:
doc = db[row.id]
Reac_names.append(doc['reactor_name'])
Reac_sigs.append(doc['significance_factor'])
except:
print("error at" + str(row.id) + "in grabbed query.")
Reac_names = np.array(Reac_names)
Reac_sigs = np.array(Reac_sigs)
return Reac_names, Reac_sigs
def cumulSum(array):
"""
Takes in an array of numbers and returns an array with the cumulative
sum values. Example: cumulSum([2,4,5,7])
will return [7, 12, 16, 18].
"""
array = np.sort(array)[::-1] #Sorts any array that's not low to high
csarray = np.zeros(array.size)
for i, val in enumerate(array):
if i == 0:
csarray[i] = val
else:
csarray[i] = csarray[i-1] + val
print(csarray)
return csarray
def normalizeArrMax(array):
"""
Takes in any numpyarray and normalizes it's elements by the largest value.
"""
largest = np.amax(array)
for i,entry in enumerate(array):
array[i] = entry/largest
print(array)
return array
def normalizeArrSum(array):
"""
Takes in any numpy array and normalizes all elements by the sum of all values.
"""
largest = np.sum(array)
for i,entry in enumerate(array):
array[i] = entry/largest
print(array)
return array
def plotAxes(x,y):
"""
Takes in two arrays and plots them in a bar graph.
The x array should contain the labels for each bar, and the y array
contains the values for each bar.
"""
nreacs = len(x)
bar_width = 0.02
opacity = 0.4
fig, ax = plt.subplots()
plt.gcf().subplots_adjust(bottom=0.2)
index = np.arange(nreacs)
plt.bar(index, y, alpha=opacity, color='g')
plt.xlabel('Reactor Core Name')
plt.ylabel('% Significance')
plt.title(r'Cumulative Sum of US/CA Reactor Significances for SNO+' + \
r'AntiNu flux (~$\frac{MWt}{D^2}$)',y=1.02, fontsize=19)
plt.xticks(index + bar_width, x, rotation='vertical',y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
if __name__ == '__main__':
x,y = getSigs()
#y_n = normalizeArrSum(y)
#plotAxes(x,y_n)
#Uncomment to plot cumulative sum
y_cs = cumulSum(y)
y_cs = normalizeArrMax(y_cs)
plotAxes(x,y_cs)
| en | 0.754416 | Get significance factors for all reactors from reacdb/static. The name and significance list are returned as numpy arrays. Takes in an array of numbers and returns an array with the cumulative sum values. Example: cumulSum([2,4,5,7]) will return [7, 12, 16, 18]. #Sorts any array that's not low to high Takes in any numpyarray and normalizes it's elements by the largest value. Takes in any numpy array and normalizes all elements by the sum of all values. Takes in two arrays and plots them in a bar graph. The x array should contain the labels for each bar, and the y array contains the values for each bar. #plt.legend() #plt.tight_layout() #could use instead of the subplots_adjust line #y_n = normalizeArrSum(y) #plotAxes(x,y_n) #Uncomment to plot cumulative sum | 2.806906 | 3 |
main.py | funsoul/sort-py | 0 | 6614089 | <filename>main.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""main"""
__author__ = "funsoul"
__email__ = "<EMAIL>"
import sys
from sorts.base import Base as BaseUtils
from sorts.bubble_sort import BubbleSort
from sorts.insertion_sort import InsertionSort
from sorts.cocktail_sort import CocktailSort
from sorts.bucket_sort import BucketSort
from sorts.counting_sort import CountingSort
from sorts.quick_sort import QuickSort
from sorts.merge_sort import MergeSort
from sorts.selection_sort import SelectionSort
if __name__ == "__main__":
length = int(sys.argv[1]) if len(sys.argv) > 1 and int(sys.argv[1]) > 0 else 10
baseUtils = BaseUtils(length)
L = baseUtils.generator()
baseUtils.print_list('List: ', L)
# sort = BubbleSort(length)
# sort.print_list('BubbleSort: ', sort.execute(L))
# sort.save_animate()
# sort = InsertionSort(length)
# sort.print_list('InsertionSort: ', sort.execute(L))
# sort.save_animate()
# sort = CocktailSort(length)
# sort.print_list('CocktailSort: ', sort.execute(L))
# sort.save_animate()
# sort = BucketSort(length)
# sort.print_list('BucketSort: ', sort.execute(L))
# sort = CountingSort(length)
# sort.print_list('CountingSort: ', sort.execute(L))
# sort = QuickSort(length)
# sort.print_list('QueueSort: ', sort.execute(L))
# sort.save_animate()
# sort = MergeSort(length)
# sort.print_list('MergeSort: ', sort.execute(L))
# sort.save_animate()
sort = SelectionSort(length)
sort.print_list('SelectionSort: ', sort.execute(L))
sort.save_animate() | <filename>main.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""main"""
__author__ = "funsoul"
__email__ = "<EMAIL>"
import sys
from sorts.base import Base as BaseUtils
from sorts.bubble_sort import BubbleSort
from sorts.insertion_sort import InsertionSort
from sorts.cocktail_sort import CocktailSort
from sorts.bucket_sort import BucketSort
from sorts.counting_sort import CountingSort
from sorts.quick_sort import QuickSort
from sorts.merge_sort import MergeSort
from sorts.selection_sort import SelectionSort
if __name__ == "__main__":
length = int(sys.argv[1]) if len(sys.argv) > 1 and int(sys.argv[1]) > 0 else 10
baseUtils = BaseUtils(length)
L = baseUtils.generator()
baseUtils.print_list('List: ', L)
# sort = BubbleSort(length)
# sort.print_list('BubbleSort: ', sort.execute(L))
# sort.save_animate()
# sort = InsertionSort(length)
# sort.print_list('InsertionSort: ', sort.execute(L))
# sort.save_animate()
# sort = CocktailSort(length)
# sort.print_list('CocktailSort: ', sort.execute(L))
# sort.save_animate()
# sort = BucketSort(length)
# sort.print_list('BucketSort: ', sort.execute(L))
# sort = CountingSort(length)
# sort.print_list('CountingSort: ', sort.execute(L))
# sort = QuickSort(length)
# sort.print_list('QueueSort: ', sort.execute(L))
# sort.save_animate()
# sort = MergeSort(length)
# sort.print_list('MergeSort: ', sort.execute(L))
# sort.save_animate()
sort = SelectionSort(length)
sort.print_list('SelectionSort: ', sort.execute(L))
sort.save_animate() | en | 0.221328 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- main # sort = BubbleSort(length) # sort.print_list('BubbleSort: ', sort.execute(L)) # sort.save_animate() # sort = InsertionSort(length) # sort.print_list('InsertionSort: ', sort.execute(L)) # sort.save_animate() # sort = CocktailSort(length) # sort.print_list('CocktailSort: ', sort.execute(L)) # sort.save_animate() # sort = BucketSort(length) # sort.print_list('BucketSort: ', sort.execute(L)) # sort = CountingSort(length) # sort.print_list('CountingSort: ', sort.execute(L)) # sort = QuickSort(length) # sort.print_list('QueueSort: ', sort.execute(L)) # sort.save_animate() # sort = MergeSort(length) # sort.print_list('MergeSort: ', sort.execute(L)) # sort.save_animate() | 2.902403 | 3 |
platform/core/polyaxon/db/models/owner.py | hackerwins/polyaxon | 0 | 6614090 | <reponame>hackerwins/polyaxon
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.validators import validate_unicode_slug
from django.db import models
from django.utils.functional import cached_property
from constants import content_types
from db.models.abstract.diff import DiffModel
class Owner(DiffModel):
"""A model that represents a project owner."""
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
related_name='+')
object_id = models.PositiveIntegerField()
owner = GenericForeignKey('content_type', 'object_id')
name = models.CharField(
max_length=150,
unique=True,
validators=[validate_unicode_slug]
)
class Meta:
app_label = 'db'
unique_together = (('content_type', 'object_id'),)
def __str__(self):
return self.name
@cached_property
def owner_type(self) -> str:
return self.content_type.model
@cached_property
def is_user_owner(self) -> bool:
return self.owner_type == content_types.USER
| from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.validators import validate_unicode_slug
from django.db import models
from django.utils.functional import cached_property
from constants import content_types
from db.models.abstract.diff import DiffModel
class Owner(DiffModel):
"""A model that represents a project owner."""
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
related_name='+')
object_id = models.PositiveIntegerField()
owner = GenericForeignKey('content_type', 'object_id')
name = models.CharField(
max_length=150,
unique=True,
validators=[validate_unicode_slug]
)
class Meta:
app_label = 'db'
unique_together = (('content_type', 'object_id'),)
def __str__(self):
return self.name
@cached_property
def owner_type(self) -> str:
return self.content_type.model
@cached_property
def is_user_owner(self) -> bool:
return self.owner_type == content_types.USER | en | 0.963205 | A model that represents a project owner. | 2.06574 | 2 |
test/test_update_contact.py | Sviatlana-Pi/python_training | 0 | 6614091 | <reponame>Sviatlana-Pi/python_training
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_update_first_contact(app):
app.session.login(username = "admin", password = "<PASSWORD>")
app.contact.update_first_contact(Contact(firstname="AntonNew", middlename="WiktorNew", lastname="LundNew", nickname="alundNew", title="ColaNew",
company="Coca ColaNew", address="Kungsgatan 11New, lgh 4", home="CityNew",
mobile="46(67)324-23-231", work="testNew", fax="411", email="<EMAIL>",
email2="<EMAIL>", homepage="www.tutNew.by", email3="<EMAIL>", bday="6",
bmonth="February", byear="1981", new_group="group1", notes="Test noteNew",
phone2="1231231321", address2="adress 21", ayear="1920", amonth="February", aday="6"))
app.session.logout() | # -*- coding: utf-8 -*-
from model.contact import Contact
def test_update_first_contact(app):
app.session.login(username = "admin", password = "<PASSWORD>")
app.contact.update_first_contact(Contact(firstname="AntonNew", middlename="WiktorNew", lastname="LundNew", nickname="alundNew", title="ColaNew",
company="Coca ColaNew", address="Kungsgatan 11New, lgh 4", home="CityNew",
mobile="46(67)324-23-231", work="testNew", fax="411", email="<EMAIL>",
email2="<EMAIL>", homepage="www.tutNew.by", email3="<EMAIL>", bday="6",
bmonth="February", byear="1981", new_group="group1", notes="Test noteNew",
phone2="1231231321", address2="adress 21", ayear="1920", amonth="February", aday="6"))
app.session.logout() | en | 0.769321 | # -*- coding: utf-8 -*- | 2.178199 | 2 |
widgets.py | humble/appengine_admin | 1 | 6614092 | <reponame>humble/appengine_admin<filename>widgets.py<gh_stars>1-10
from functools import partial
from . import wtforms
from wtforms import widgets as w
class DateTextInput(w.TextInput):
'''Custom datetime text widget with an added class for easy JavaScript targeting.'''
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'admin-date')
return super(DateTextInput, self).__call__(*args, **kwargs)
class DateTimeTextInput(w.TextInput):
'''Custom date text widget with an added class for easy JavaScript targeting.'''
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'admin-datetime')
return super(DateTimeTextInput, self).__call__(*args, **kwargs)
class BooleanWidget(w.ListWidget):
'''Gives a nice UI to let properties default to None.'''
def __init__(self, prefix_label=False, *args, **kwargs):
super(BooleanWidget, self).__init__(prefix_label=prefix_label, *args, **kwargs)
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'boolean-field')
return super(BooleanWidget, self).__call__(*args, **kwargs)
class AjaxKeyWidget(object):
'''A ListProperty-compatible widget to easily manage Key entries.
Pass object_classes to suggest what types of objects are allowed for lookup.
The widget also includes any classes that are currently referenced
in the list property.
For lists of db.Key, this widget offers AJAX pagination of the above mentioned
classes and allows for easy add/delete of each instance.
'''
def __init__(self, multiple=True):
self.multiple = multiple
def __call__(self, field, **kwargs):
flat_attrs = w.core.html_params(name=field.name, **kwargs)
# Convert the value into keys, objects and object classes
field.process_formdata(field.data)
from .handlers import AdminHandler
handler = AdminHandler()
from webapp2_extras import jinja2
return jinja2.get_jinja2().render_template(
'widgets/ajax_list_property.html',
multiple=self.multiple,
required=field.required,
flat_attrs=flat_attrs,
objects=field.objects,
object_classes=field.object_classes,
get_item_edit_url=partial(self._get_item_edit_url, handler=handler),
get_reference_key=self._get_reference_key,
name=field.name,
paged_selector=partial(self._paged_selector, handler=handler),
)
@staticmethod
def _get_reference_key(obj):
return obj.admin_reference_key() if hasattr(obj, 'admin_reference_key') else obj.key()
@staticmethod
def _get_item_edit_url(model_instance, handler):
return model_instance.admin_edit_url(handler) if hasattr(model_instance, 'admin_edit_url') \
else handler.uri_for('appengine_admin.edit', model_name=model_instance.__class__.__name__, key=model_instance.key())
@staticmethod
def _paged_selector(paged_cls, handler):
from . import admin_settings, model_register
from .utils import Http404, import_path, Paginator
base_url = handler.uri_for('appengine_admin.list', model_name=paged_cls.__name__)
try:
model_admin = model_register.get_model_admin(paged_cls.__name__)
paginator = Paginator(model_admin).get_page({}, base_url=base_url)
except Http404:
GenericPaginator = import_path(admin_settings.PAGINATOR_PATH)
paginator = GenericPaginator(
paged_cls,
per_page=admin_settings.ADMIN_ITEMS_PER_PAGE
).get_page({}, base_url=base_url)
return paginator
| from functools import partial
from . import wtforms
from wtforms import widgets as w
class DateTextInput(w.TextInput):
'''Custom datetime text widget with an added class for easy JavaScript targeting.'''
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'admin-date')
return super(DateTextInput, self).__call__(*args, **kwargs)
class DateTimeTextInput(w.TextInput):
'''Custom date text widget with an added class for easy JavaScript targeting.'''
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'admin-datetime')
return super(DateTimeTextInput, self).__call__(*args, **kwargs)
class BooleanWidget(w.ListWidget):
'''Gives a nice UI to let properties default to None.'''
def __init__(self, prefix_label=False, *args, **kwargs):
super(BooleanWidget, self).__init__(prefix_label=prefix_label, *args, **kwargs)
def __call__(self, *args, **kwargs):
kwargs.setdefault('class', 'boolean-field')
return super(BooleanWidget, self).__call__(*args, **kwargs)
class AjaxKeyWidget(object):
'''A ListProperty-compatible widget to easily manage Key entries.
Pass object_classes to suggest what types of objects are allowed for lookup.
The widget also includes any classes that are currently referenced
in the list property.
For lists of db.Key, this widget offers AJAX pagination of the above mentioned
classes and allows for easy add/delete of each instance.
'''
def __init__(self, multiple=True):
self.multiple = multiple
def __call__(self, field, **kwargs):
flat_attrs = w.core.html_params(name=field.name, **kwargs)
# Convert the value into keys, objects and object classes
field.process_formdata(field.data)
from .handlers import AdminHandler
handler = AdminHandler()
from webapp2_extras import jinja2
return jinja2.get_jinja2().render_template(
'widgets/ajax_list_property.html',
multiple=self.multiple,
required=field.required,
flat_attrs=flat_attrs,
objects=field.objects,
object_classes=field.object_classes,
get_item_edit_url=partial(self._get_item_edit_url, handler=handler),
get_reference_key=self._get_reference_key,
name=field.name,
paged_selector=partial(self._paged_selector, handler=handler),
)
@staticmethod
def _get_reference_key(obj):
return obj.admin_reference_key() if hasattr(obj, 'admin_reference_key') else obj.key()
@staticmethod
def _get_item_edit_url(model_instance, handler):
return model_instance.admin_edit_url(handler) if hasattr(model_instance, 'admin_edit_url') \
else handler.uri_for('appengine_admin.edit', model_name=model_instance.__class__.__name__, key=model_instance.key())
@staticmethod
def _paged_selector(paged_cls, handler):
from . import admin_settings, model_register
from .utils import Http404, import_path, Paginator
base_url = handler.uri_for('appengine_admin.list', model_name=paged_cls.__name__)
try:
model_admin = model_register.get_model_admin(paged_cls.__name__)
paginator = Paginator(model_admin).get_page({}, base_url=base_url)
except Http404:
GenericPaginator = import_path(admin_settings.PAGINATOR_PATH)
paginator = GenericPaginator(
paged_cls,
per_page=admin_settings.ADMIN_ITEMS_PER_PAGE
).get_page({}, base_url=base_url)
return paginator | en | 0.888111 | Custom datetime text widget with an added class for easy JavaScript targeting. Custom date text widget with an added class for easy JavaScript targeting. Gives a nice UI to let properties default to None. A ListProperty-compatible widget to easily manage Key entries. Pass object_classes to suggest what types of objects are allowed for lookup. The widget also includes any classes that are currently referenced in the list property. For lists of db.Key, this widget offers AJAX pagination of the above mentioned classes and allows for easy add/delete of each instance. # Convert the value into keys, objects and object classes | 2.617377 | 3 |
thaniya_server/src/thaniya_server/utils/__init__.py | jkpubsrc/Thaniya | 1 | 6614093 | <filename>thaniya_server/src/thaniya_server/utils/__init__.py
__version__ = "0.2021.2.15"
from .APIError import APIError
from .ID import ID
from .IDCreator import IDCreator
from .SecureRandomIDGenerator import SecureRandomIDGenerator
from .SecureRandomDataGenerator import SecureRandomDataGenerator
from .Scheduler import Scheduler
from .ProcessFilter import ProcessFilter
from .ProcessNotifier import ProcessNotifier
from .LockFile import LockFile
from .InUseFlag import InUseFlag
from .SVGDiskSpaceGraphGenerator import SVGDiskSpaceGraphGenerator | <filename>thaniya_server/src/thaniya_server/utils/__init__.py
__version__ = "0.2021.2.15"
from .APIError import APIError
from .ID import ID
from .IDCreator import IDCreator
from .SecureRandomIDGenerator import SecureRandomIDGenerator
from .SecureRandomDataGenerator import SecureRandomDataGenerator
from .Scheduler import Scheduler
from .ProcessFilter import ProcessFilter
from .ProcessNotifier import ProcessNotifier
from .LockFile import LockFile
from .InUseFlag import InUseFlag
from .SVGDiskSpaceGraphGenerator import SVGDiskSpaceGraphGenerator | none | 1 | 0.980822 | 1 | |
create_qr.py | tomasr8/qr | 0 | 6614094 | <reponame>tomasr8/qr
import matplotlib.pyplot as plt
import numpy as np
import cv2
__all__ = ['create_qr']
SQUARES = 24 # squares per side
PX = 40 # pixels per square
TOTAL_SIZE = SQUARES * PX
CENTER = TOTAL_SIZE//2
DATA_ROWS = SQUARES - 6
DATA_COLS = DATA_ROWS
BIG_CIRCLE_RADIUS = 2 * PX
SMALL_CIRCLE_RADIUS = 1 * PX
def create_circular_mask(x, y, r, w, h):
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - x)**2 + (Y - y)**2)
mask = dist_from_center <= r
return mask
def draw_square(image, i, j):
x = i*PX
y = j*PX
xp = (i+1)*PX
yp = (j+1)*PX
image[y:yp, x:xp] = 0
def draw_black_inner_border(image):
image[PX:-PX, PX:2*PX] = 0
image[PX:-PX, -2*PX:-PX] = 0
image[PX:2*PX, PX:-PX] = 0
image[-2*PX:-PX, PX:-PX] = 0
def draw_start_corner(image):
draw_square(image, 2, 2)
def draw_end_corner(image):
draw_square(image, 2, SQUARES - 4)
draw_square(image, 2, SQUARES - 3)
draw_square(image, 3, SQUARES - 3)
def draw_circles(image):
big_circle_mask = create_circular_mask(CENTER, CENTER, BIG_CIRCLE_RADIUS, TOTAL_SIZE, TOTAL_SIZE)
image[big_circle_mask] = 0
small_circle_mask = create_circular_mask(CENTER, CENTER, SMALL_CIRCLE_RADIUS, TOTAL_SIZE, TOTAL_SIZE)
image[small_circle_mask] = 255
def encode_string(string: str):
assert len(string) <= 16
data = list(string)
if len(data) < 16:
data += [" "] * (16 - len(data))
BASE64 = " !0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
encoded = []
for letter in data:
index = BASE64.find(letter)
binary_string = "{0:06b}".format(index)
for b in binary_string:
encoded.append(int(b))
return encoded + encoded + encoded
def create_qr(string: str):
data = encode_string(string)
assert len(data) == 96 * 3
image = np.ones((SQUARES * PX, SQUARES * PX), dtype=np.uint8)
image[:] = 255
draw_black_inner_border(image)
draw_start_corner(image)
draw_end_corner(image)
draw_circles(image)
sx = 3
sy = 3
k = 0
for i in range(DATA_ROWS):
for j in range(DATA_COLS):
x = (sx + i)
y = (sy + j)
if (x >= 9 and x <= 14) and (y >= 9 and y <= 14): # reserved space for the circles
continue
if data[k] == 0:
draw_square(image, x, y)
k += 1
qr = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
return qr
if __name__ == '__main__':
qr = create_qr("Hello world!")
cv2.imshow("frame", qr)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# cv2.imwrite("qr5.jpg", qr)
| import matplotlib.pyplot as plt
import numpy as np
import cv2
__all__ = ['create_qr']
SQUARES = 24 # squares per side
PX = 40 # pixels per square
TOTAL_SIZE = SQUARES * PX
CENTER = TOTAL_SIZE//2
DATA_ROWS = SQUARES - 6
DATA_COLS = DATA_ROWS
BIG_CIRCLE_RADIUS = 2 * PX
SMALL_CIRCLE_RADIUS = 1 * PX
def create_circular_mask(x, y, r, w, h):
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - x)**2 + (Y - y)**2)
mask = dist_from_center <= r
return mask
def draw_square(image, i, j):
x = i*PX
y = j*PX
xp = (i+1)*PX
yp = (j+1)*PX
image[y:yp, x:xp] = 0
def draw_black_inner_border(image):
image[PX:-PX, PX:2*PX] = 0
image[PX:-PX, -2*PX:-PX] = 0
image[PX:2*PX, PX:-PX] = 0
image[-2*PX:-PX, PX:-PX] = 0
def draw_start_corner(image):
draw_square(image, 2, 2)
def draw_end_corner(image):
draw_square(image, 2, SQUARES - 4)
draw_square(image, 2, SQUARES - 3)
draw_square(image, 3, SQUARES - 3)
def draw_circles(image):
big_circle_mask = create_circular_mask(CENTER, CENTER, BIG_CIRCLE_RADIUS, TOTAL_SIZE, TOTAL_SIZE)
image[big_circle_mask] = 0
small_circle_mask = create_circular_mask(CENTER, CENTER, SMALL_CIRCLE_RADIUS, TOTAL_SIZE, TOTAL_SIZE)
image[small_circle_mask] = 255
def encode_string(string: str):
assert len(string) <= 16
data = list(string)
if len(data) < 16:
data += [" "] * (16 - len(data))
BASE64 = " !0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
encoded = []
for letter in data:
index = BASE64.find(letter)
binary_string = "{0:06b}".format(index)
for b in binary_string:
encoded.append(int(b))
return encoded + encoded + encoded
def create_qr(string: str):
data = encode_string(string)
assert len(data) == 96 * 3
image = np.ones((SQUARES * PX, SQUARES * PX), dtype=np.uint8)
image[:] = 255
draw_black_inner_border(image)
draw_start_corner(image)
draw_end_corner(image)
draw_circles(image)
sx = 3
sy = 3
k = 0
for i in range(DATA_ROWS):
for j in range(DATA_COLS):
x = (sx + i)
y = (sy + j)
if (x >= 9 and x <= 14) and (y >= 9 and y <= 14): # reserved space for the circles
continue
if data[k] == 0:
draw_square(image, x, y)
k += 1
qr = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
return qr
if __name__ == '__main__':
qr = create_qr("Hello world!")
cv2.imshow("frame", qr)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# cv2.imwrite("qr5.jpg", qr) | en | 0.510751 | # squares per side # pixels per square # reserved space for the circles # cv2.imwrite("qr5.jpg", qr) | 2.920002 | 3 |
check_git_repo_bare.py | adolci/nagios-plugins | 0 | 6614095 | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2016-07-25 14:57:36 +0100 (Mon, 25 Jul 2016)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check a Git repo is bare
You can negate the result if you want to check the repo isn't bare
Requires the 'git' command in the $PATH, otherwise you can set the path to the git
executable using the environment variable GIT_PYTHON_GIT_EXECUTABLE
See also check_git_checkout_branch.py
check_git_uncommitted_changes.py - handles untracked files and gives better feedback, reporting and stats
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import os
import sys
import traceback
import git
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from git import InvalidGitRepositoryError
from harisekhon.utils import CriticalError, validate_directory
from harisekhon import NagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.1'
class CheckGitRepoBare(NagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckGitRepoBare, self).__init__()
# Python 3.x
# super().__init__()
self.msg = 'CheckGitRepoBare msg not defined'
self.ok()
def add_options(self):
self.add_opt('-d', '--directory', action='store', help='Path to git repo directory')
self.add_opt('--not-bare', action='store_true', help="Check repo isn't bare")
def run(self):
self.no_args()
directory = self.get_opt('directory')
validate_directory(directory)
directory = os.path.abspath(directory)
try:
repo = git.Repo(directory)
except InvalidGitRepositoryError:
raise CriticalError("directory '{}' does not contain a valid Git repository!".format(directory))
is_bare = repo.bare
self.msg = "git checkout bare = '{}' for directory '{}'".format(is_bare, directory)
if self.get_opt('not_bare'):
if is_bare:
self.critical()
else:
if not is_bare:
self.critical()
if __name__ == '__main__':
CheckGitRepoBare().main()
| #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2016-07-25 14:57:36 +0100 (Mon, 25 Jul 2016)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check a Git repo is bare
You can negate the result if you want to check the repo isn't bare
Requires the 'git' command in the $PATH, otherwise you can set the path to the git
executable using the environment variable GIT_PYTHON_GIT_EXECUTABLE
See also check_git_checkout_branch.py
check_git_uncommitted_changes.py - handles untracked files and gives better feedback, reporting and stats
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import os
import sys
import traceback
import git
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from git import InvalidGitRepositoryError
from harisekhon.utils import CriticalError, validate_directory
from harisekhon import NagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.1'
class CheckGitRepoBare(NagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckGitRepoBare, self).__init__()
# Python 3.x
# super().__init__()
self.msg = 'CheckGitRepoBare msg not defined'
self.ok()
def add_options(self):
self.add_opt('-d', '--directory', action='store', help='Path to git repo directory')
self.add_opt('--not-bare', action='store_true', help="Check repo isn't bare")
def run(self):
self.no_args()
directory = self.get_opt('directory')
validate_directory(directory)
directory = os.path.abspath(directory)
try:
repo = git.Repo(directory)
except InvalidGitRepositoryError:
raise CriticalError("directory '{}' does not contain a valid Git repository!".format(directory))
is_bare = repo.bare
self.msg = "git checkout bare = '{}' for directory '{}'".format(is_bare, directory)
if self.get_opt('not_bare'):
if is_bare:
self.critical()
else:
if not is_bare:
self.critical()
if __name__ == '__main__':
CheckGitRepoBare().main()
| en | 0.663597 | #!/usr/bin/env python # vim:ts=4:sts=4:sw=4:et # # Author: <NAME> # Date: 2016-07-25 14:57:36 +0100 (Mon, 25 Jul 2016) # # https://github.com/harisekhon/nagios-plugins # # License: see accompanying Hari Sekhon LICENSE file # # If you're using my code you're welcome to connect with me on LinkedIn # and optionally send me feedback to help steer this or other code I publish # # https://www.linkedin.com/in/harisekhon # Nagios Plugin to check a Git repo is bare You can negate the result if you want to check the repo isn't bare Requires the 'git' command in the $PATH, otherwise you can set the path to the git executable using the environment variable GIT_PYTHON_GIT_EXECUTABLE See also check_git_checkout_branch.py check_git_uncommitted_changes.py - handles untracked files and gives better feedback, reporting and stats #from __future__ import unicode_literals # pylint: disable=wrong-import-position # Python 2.x # Python 3.x # super().__init__() | 1.707626 | 2 |
src/radical/pilot/agent/launch_method/mpiexec.py | radical-cybertools/radical.pilot | 47 | 6614096 | <gh_stars>10-100
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import radical.utils as ru
from .base import LaunchMethod
# ------------------------------------------------------------------------------
#
class MPIExec(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, lm_cfg, rm_info, log, prof):
self._mpt : bool = False
self._rsh : bool = False
self._ccmrun : str = ''
self._dplace : str = ''
self._omplace: str = ''
self._command: str = ''
LaunchMethod.__init__(self, name, lm_cfg, rm_info, log, prof)
# --------------------------------------------------------------------------
#
def _init_from_scratch(self, env, env_sh):
lm_info = {
'env' : env,
'env_sh' : env_sh,
'command': ru.which([
'mpiexec', # General case
'mpiexec.mpich', # Linux, MPICH
'mpiexec.hydra', # Linux, MPICH
'mpiexec.openmpi', # Linux, MPICH
'mpiexec-mpich-mp', # Mac OSX MacPorts
'mpiexec-openmpi-mp', # Mac OSX MacPorts
'mpiexec_mpt', # Cheyenne (NCAR)
]),
'mpt' : False,
'rsh' : False,
'ccmrun' : '',
'dplace' : '',
'omplace': ''
}
if '_mpt' in self.name.lower():
lm_info['mpt'] = True
if '_rsh' in self.name.lower():
lm_info['rsh'] = True
# do we need ccmrun or dplace?
if '_ccmrun' in self.name.lower():
lm_info['ccmrun'] = ru.which('ccmrun')
assert lm_info['ccmrun']
if '_dplace' in self.name.lower():
lm_info['dplace'] = ru.which('dplace')
assert lm_info['dplace']
# cheyenne always needs mpt and omplace
if 'cheyenne' in ru.get_hostname():
lm_info['omplace'] = 'omplace'
lm_info['mpt'] = True
mpi_version, mpi_flavor = self._get_mpi_info(lm_info['command'])
lm_info['mpi_version'] = mpi_version
lm_info['mpi_flavor'] = mpi_flavor
return lm_info
# --------------------------------------------------------------------------
#
def _init_from_info(self, lm_info):
self._env = lm_info['env']
self._env_sh = lm_info['env_sh']
self._command = lm_info['command']
assert self._command
self._mpt = lm_info['mpt']
self._rsh = lm_info['rsh']
self._dplace = lm_info['dplace']
self._ccmrun = lm_info['ccmrun']
self._mpi_version = lm_info['mpi_version']
self._mpi_flavor = lm_info['mpi_flavor']
# ensure empty string on unset omplace
if not lm_info['omplace']:
self._omplace = ''
else:
self._omplace = 'omplace'
# --------------------------------------------------------------------------
#
def finalize(self):
pass
# --------------------------------------------------------------------------
#
def can_launch(self, task):
if not task['description']['executable']:
return False, 'no executable'
return True, ''
# --------------------------------------------------------------------------
#
def get_launcher_env(self):
return ['. $RP_PILOT_SANDBOX/%s' % self._env_sh]
# --------------------------------------------------------------------------
#
def get_launch_cmds(self, task, exec_path):
slots = task['slots']
if 'ranks' not in slots:
raise RuntimeError('insufficient information to launch via %s: %s'
% (self.name, slots))
# extract a map of hosts and #slots from slots. We count cpu
# slot sets, but do not account for threads. Since multiple slots
# entries can have the same node names, we *add* new information.
host_slots = dict()
for rank in slots['ranks']:
node_name = rank['node_name']
if node_name not in host_slots:
host_slots[node_name] = 0
host_slots[node_name] += len(rank['core_map'])
# If we have a Task with many cores, and the compression didn't work
# out, we will create a hostfile and pass that as an argument
# instead of the individual hosts. The hostfile has this format:
#
# node_name_1 slots=n_1
# node_name_2 slots=n_2
# ...
#
# where the slot number is the number of processes we intent to spawn.
#
# For smaller node sets, we construct command line parameters as
# clusters of nodes with the same number of processes, like this:
#
# -host node_name_1,node_name_2 -n 8 : -host node_name_3 -n 4 : ...
#
# POSIX defines a min argument limit of 4096 bytes, so we try to
# construct a command line, and switch to hostfile if that limit is
# exceeded. We assume that Disk I/O for the hostfile is much larger
# than the time needed to construct the command line.
arg_max = 4096
# This is the command w/o the host string
cmd_stub = '%s %%s %s %s' % (self._command, self._omplace, exec_path)
# cluster hosts by number of slots
host_string = ''
if not self._mpt:
for node, nslots in list(host_slots.items()):
host_string += '-host '
host_string += '%s -n %s ' % (','.join([node] * nslots), nslots)
else:
hosts = list()
for node, nslots in list(host_slots.items()):
hosts += [node] * nslots
host_string += ','.join(hosts)
host_string += ' -n 1'
cmd = cmd_stub % host_string
if len(cmd) > arg_max:
# Create a hostfile from the list of hosts. We create that in the
# task sandbox
hostfile = '%s/mpi_hostfile' % task['task_sandbox_path']
with ru.ru_open(hostfile, 'w') as f:
for node, nslots in list(host_slots.items()):
f.write('%20s \tslots=%s\n' % (node, nslots))
host_string = '-hostfile %s' % hostfile
cmd = cmd_stub % host_string
self._log.debug('mpiexec cmd: %s', cmd)
assert(len(cmd) <= arg_max)
# Cheyenne is the only machine that requires mpiexec_mpt.
# We then have to set MPI_SHEPHERD=true
if self._mpt:
cmd = 'export MPI_SHEPHERD=true\n%s' % cmd
return cmd.strip()
# --------------------------------------------------------------------------
#
def get_rank_cmd(self):
# FIXME: we know the MPI flavor, so make this less guesswork
ret = 'test -z "$MPI_RANK" || export RP_RANK=$MPI_RANK\n'
ret += 'test -z "$PMIX_RANK" || export RP_RANK=$PMIX_RANK\n'
return ret
# --------------------------------------------------------------------------
#
def get_rank_exec(self, task, rank_id, rank):
td = task['description']
task_exec = td['executable']
task_args = td['arguments']
task_argstr = self._create_arg_string(task_args)
command = '%s %s' % (task_exec, task_argstr)
return command.rstrip()
# ------------------------------------------------------------------------------
| __copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import radical.utils as ru
from .base import LaunchMethod
# ------------------------------------------------------------------------------
#
class MPIExec(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, lm_cfg, rm_info, log, prof):
self._mpt : bool = False
self._rsh : bool = False
self._ccmrun : str = ''
self._dplace : str = ''
self._omplace: str = ''
self._command: str = ''
LaunchMethod.__init__(self, name, lm_cfg, rm_info, log, prof)
# --------------------------------------------------------------------------
#
def _init_from_scratch(self, env, env_sh):
lm_info = {
'env' : env,
'env_sh' : env_sh,
'command': ru.which([
'mpiexec', # General case
'mpiexec.mpich', # Linux, MPICH
'mpiexec.hydra', # Linux, MPICH
'mpiexec.openmpi', # Linux, MPICH
'mpiexec-mpich-mp', # Mac OSX MacPorts
'mpiexec-openmpi-mp', # Mac OSX MacPorts
'mpiexec_mpt', # Cheyenne (NCAR)
]),
'mpt' : False,
'rsh' : False,
'ccmrun' : '',
'dplace' : '',
'omplace': ''
}
if '_mpt' in self.name.lower():
lm_info['mpt'] = True
if '_rsh' in self.name.lower():
lm_info['rsh'] = True
# do we need ccmrun or dplace?
if '_ccmrun' in self.name.lower():
lm_info['ccmrun'] = ru.which('ccmrun')
assert lm_info['ccmrun']
if '_dplace' in self.name.lower():
lm_info['dplace'] = ru.which('dplace')
assert lm_info['dplace']
# cheyenne always needs mpt and omplace
if 'cheyenne' in ru.get_hostname():
lm_info['omplace'] = 'omplace'
lm_info['mpt'] = True
mpi_version, mpi_flavor = self._get_mpi_info(lm_info['command'])
lm_info['mpi_version'] = mpi_version
lm_info['mpi_flavor'] = mpi_flavor
return lm_info
# --------------------------------------------------------------------------
#
def _init_from_info(self, lm_info):
self._env = lm_info['env']
self._env_sh = lm_info['env_sh']
self._command = lm_info['command']
assert self._command
self._mpt = lm_info['mpt']
self._rsh = lm_info['rsh']
self._dplace = lm_info['dplace']
self._ccmrun = lm_info['ccmrun']
self._mpi_version = lm_info['mpi_version']
self._mpi_flavor = lm_info['mpi_flavor']
# ensure empty string on unset omplace
if not lm_info['omplace']:
self._omplace = ''
else:
self._omplace = 'omplace'
# --------------------------------------------------------------------------
#
def finalize(self):
pass
# --------------------------------------------------------------------------
#
def can_launch(self, task):
if not task['description']['executable']:
return False, 'no executable'
return True, ''
# --------------------------------------------------------------------------
#
def get_launcher_env(self):
return ['. $RP_PILOT_SANDBOX/%s' % self._env_sh]
# --------------------------------------------------------------------------
#
def get_launch_cmds(self, task, exec_path):
slots = task['slots']
if 'ranks' not in slots:
raise RuntimeError('insufficient information to launch via %s: %s'
% (self.name, slots))
# extract a map of hosts and #slots from slots. We count cpu
# slot sets, but do not account for threads. Since multiple slots
# entries can have the same node names, we *add* new information.
host_slots = dict()
for rank in slots['ranks']:
node_name = rank['node_name']
if node_name not in host_slots:
host_slots[node_name] = 0
host_slots[node_name] += len(rank['core_map'])
# If we have a Task with many cores, and the compression didn't work
# out, we will create a hostfile and pass that as an argument
# instead of the individual hosts. The hostfile has this format:
#
# node_name_1 slots=n_1
# node_name_2 slots=n_2
# ...
#
# where the slot number is the number of processes we intent to spawn.
#
# For smaller node sets, we construct command line parameters as
# clusters of nodes with the same number of processes, like this:
#
# -host node_name_1,node_name_2 -n 8 : -host node_name_3 -n 4 : ...
#
# POSIX defines a min argument limit of 4096 bytes, so we try to
# construct a command line, and switch to hostfile if that limit is
# exceeded. We assume that Disk I/O for the hostfile is much larger
# than the time needed to construct the command line.
arg_max = 4096
# This is the command w/o the host string
cmd_stub = '%s %%s %s %s' % (self._command, self._omplace, exec_path)
# cluster hosts by number of slots
host_string = ''
if not self._mpt:
for node, nslots in list(host_slots.items()):
host_string += '-host '
host_string += '%s -n %s ' % (','.join([node] * nslots), nslots)
else:
hosts = list()
for node, nslots in list(host_slots.items()):
hosts += [node] * nslots
host_string += ','.join(hosts)
host_string += ' -n 1'
cmd = cmd_stub % host_string
if len(cmd) > arg_max:
# Create a hostfile from the list of hosts. We create that in the
# task sandbox
hostfile = '%s/mpi_hostfile' % task['task_sandbox_path']
with ru.ru_open(hostfile, 'w') as f:
for node, nslots in list(host_slots.items()):
f.write('%20s \tslots=%s\n' % (node, nslots))
host_string = '-hostfile %s' % hostfile
cmd = cmd_stub % host_string
self._log.debug('mpiexec cmd: %s', cmd)
assert(len(cmd) <= arg_max)
# Cheyenne is the only machine that requires mpiexec_mpt.
# We then have to set MPI_SHEPHERD=true
if self._mpt:
cmd = 'export MPI_SHEPHERD=true\n%s' % cmd
return cmd.strip()
# --------------------------------------------------------------------------
#
def get_rank_cmd(self):
# FIXME: we know the MPI flavor, so make this less guesswork
ret = 'test -z "$MPI_RANK" || export RP_RANK=$MPI_RANK\n'
ret += 'test -z "$PMIX_RANK" || export RP_RANK=$PMIX_RANK\n'
return ret
# --------------------------------------------------------------------------
#
def get_rank_exec(self, task, rank_id, rank):
td = task['description']
task_exec = td['executable']
task_args = td['arguments']
task_argstr = self._create_arg_string(task_args)
command = '%s %s' % (task_exec, task_argstr)
return command.rstrip()
# ------------------------------------------------------------------------------ | en | 0.616643 | # ------------------------------------------------------------------------------ # # -------------------------------------------------------------------------- # # -------------------------------------------------------------------------- # # General case # Linux, MPICH # Linux, MPICH # Linux, MPICH # Mac OSX MacPorts # Mac OSX MacPorts # Cheyenne (NCAR) # do we need ccmrun or dplace? # cheyenne always needs mpt and omplace # -------------------------------------------------------------------------- # # ensure empty string on unset omplace # -------------------------------------------------------------------------- # # -------------------------------------------------------------------------- # # -------------------------------------------------------------------------- # # -------------------------------------------------------------------------- # # extract a map of hosts and #slots from slots. We count cpu # slot sets, but do not account for threads. Since multiple slots # entries can have the same node names, we *add* new information. # If we have a Task with many cores, and the compression didn't work # out, we will create a hostfile and pass that as an argument # instead of the individual hosts. The hostfile has this format: # # node_name_1 slots=n_1 # node_name_2 slots=n_2 # ... # # where the slot number is the number of processes we intent to spawn. # # For smaller node sets, we construct command line parameters as # clusters of nodes with the same number of processes, like this: # # -host node_name_1,node_name_2 -n 8 : -host node_name_3 -n 4 : ... # # POSIX defines a min argument limit of 4096 bytes, so we try to # construct a command line, and switch to hostfile if that limit is # exceeded. We assume that Disk I/O for the hostfile is much larger # than the time needed to construct the command line. # This is the command w/o the host string # cluster hosts by number of slots # Create a hostfile from the list of hosts. We create that in the # task sandbox # Cheyenne is the only machine that requires mpiexec_mpt. # We then have to set MPI_SHEPHERD=true # -------------------------------------------------------------------------- # # FIXME: we know the MPI flavor, so make this less guesswork # -------------------------------------------------------------------------- # # ------------------------------------------------------------------------------ | 1.981876 | 2 |
Fundamentos/funciones-con-argumentos.py | ijchavez/python | 0 | 6614097 | nombre = input("Ingrese un nombre: ")
edad = int(input("Ingrese una edad: "))
def funcion_arg(nombre, edad):
print("El nombre recibido es:",nombre, "de", edad,"años")
funcion_arg(nombre, edad)
def funcion_arg(nombre, apellido):
print("El nombre recibido es:",nombre)
print("El apellido recibido es:",apellido)
funcion_arg("Gerardo","Chavez")
| nombre = input("Ingrese un nombre: ")
edad = int(input("Ingrese una edad: "))
def funcion_arg(nombre, edad):
print("El nombre recibido es:",nombre, "de", edad,"años")
funcion_arg(nombre, edad)
def funcion_arg(nombre, apellido):
print("El nombre recibido es:",nombre)
print("El apellido recibido es:",apellido)
funcion_arg("Gerardo","Chavez")
| none | 1 | 3.957219 | 4 | |
codedigger/atcoder/scrapers_utils.py | jyothiprakashpanaik/Backend | 17 | 6614098 | import re
import json
import requests
from bs4 import BeautifulSoup as bs4
def get_all_contests_details(content):
contests_details = set()
soup = bs4(content, 'html5lib')
contestTable = soup.find('table', {'id': 'history'})
del soup
if contestTable != None:
contests = contestTable.find('tbody').findAll('tr')
del contestTable
for contest in contests:
contests_details.add(
contest.findAll('td')[1].find('a')['href'].split('/')[-1])
del contests
return contests_details | import re
import json
import requests
from bs4 import BeautifulSoup as bs4
def get_all_contests_details(content):
contests_details = set()
soup = bs4(content, 'html5lib')
contestTable = soup.find('table', {'id': 'history'})
del soup
if contestTable != None:
contests = contestTable.find('tbody').findAll('tr')
del contestTable
for contest in contests:
contests_details.add(
contest.findAll('td')[1].find('a')['href'].split('/')[-1])
del contests
return contests_details | none | 1 | 2.754603 | 3 | |
roster_repository.py | galiarmero/fantasy-basketball-tools | 0 | 6614099 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as expect
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from bs4 import BeautifulSoup
import re
import sys
from config import YAHOO_FANTASY_URL
TEAM_NAME_CLASS = "Mawpx-250"
WIN_LOSS_DRAW_CLASS = "Tst-wlt"
POS_LABEL_CLASS = "pos-label"
PLAYER_NAME_CLASS = "ysf-player-name"
class RosterRepository(object):
def __init__(self, driver):
self._driver = driver
self._wait = WebDriverWait(self._driver, 10, poll_frequency=0.25)
def get_active_rosters(self, league_id):
self._assert_current_page_matches_league()
teams_info = self._get_teams_info()
for team in teams_info:
print("Generating roster for team {}".format(team['name']))
href = team['href']
self._driver.get(href)
try:
self._wait.until(expect.url_contains(href))
except TimeoutException:
print("Roster page for team {} is either invalid " \
"or taking long to load".format(team['name']))
sys.exit()
team['roster'] = self._get_roster(True)
return teams_info
def _get_roster(self, active=False):
stats_table = self._driver.find_element_by_id('statTable0').get_attribute('innerHTML')
stats_soup = BeautifulSoup(stats_table, 'html.parser')
roster_rows = [ tr for tr in stats_soup.find('tbody').find_all('tr') \
if 'empty-bench' not in tr['class'] ]
roster = []
for player_row in roster_rows:
roster_position = player_row.find(class_=POS_LABEL_CLASS)['data-pos']
if active and roster_position == 'IL':
continue
player = self._get_player_info(player_row)
if player:
player['roster_position'] = roster_position
roster.append(player)
return roster
def _get_player_info(self, player_row):
player_info_element = player_row.find(class_=PLAYER_NAME_CLASS)
player_name_element = player_info_element.find("a")
if not player_name_element:
return None
href = player_name_element['href']
name = player_name_element.text
team_pos = player_info_element.find("span").text.split(' - ')
team = team_pos[0].upper()
eligible_positions = team_pos[1].split(',')
return { 'name' : name, 'href' : href, 'team' : team, \
'eligible_positions' : eligible_positions }
def _get_teams_info(self):
standings = self._driver.find_element_by_id("standingstable").get_attribute('innerHTML')
standings_soup = BeautifulSoup(standings, 'html.parser')
team_rows = standings_soup.find('tbody').find_all('tr')
print("Generating teams' information")
teams_info = []
for team_row in team_rows:
id = int(team_row['data-target'].split('/')[-1])
team_name_element = team_row.find(class_=TEAM_NAME_CLASS)
name = team_name_element.text
href = YAHOO_FANTASY_URL + team_name_element['href']
record = team_row.find(class_=WIN_LOSS_DRAW_CLASS).text.split('-')
wins = int(record[0])
losses = int(record[1])
draws = int(record[2])
teams_info.append({
'id' : id,
'name' : name,
'href' : href,
'wins' : wins,
'losses' : losses,
'draws' : draws
})
return teams_info
def _assert_current_page_matches_league(self):
if not re.search(YAHOO_FANTASY_URL + '/nba/\d+.*?$', self._driver.current_url):
print("Current page is not a league page. Exiting.")
sys.exit() | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as expect
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from bs4 import BeautifulSoup
import re
import sys
from config import YAHOO_FANTASY_URL
TEAM_NAME_CLASS = "Mawpx-250"
WIN_LOSS_DRAW_CLASS = "Tst-wlt"
POS_LABEL_CLASS = "pos-label"
PLAYER_NAME_CLASS = "ysf-player-name"
class RosterRepository(object):
def __init__(self, driver):
self._driver = driver
self._wait = WebDriverWait(self._driver, 10, poll_frequency=0.25)
def get_active_rosters(self, league_id):
self._assert_current_page_matches_league()
teams_info = self._get_teams_info()
for team in teams_info:
print("Generating roster for team {}".format(team['name']))
href = team['href']
self._driver.get(href)
try:
self._wait.until(expect.url_contains(href))
except TimeoutException:
print("Roster page for team {} is either invalid " \
"or taking long to load".format(team['name']))
sys.exit()
team['roster'] = self._get_roster(True)
return teams_info
def _get_roster(self, active=False):
stats_table = self._driver.find_element_by_id('statTable0').get_attribute('innerHTML')
stats_soup = BeautifulSoup(stats_table, 'html.parser')
roster_rows = [ tr for tr in stats_soup.find('tbody').find_all('tr') \
if 'empty-bench' not in tr['class'] ]
roster = []
for player_row in roster_rows:
roster_position = player_row.find(class_=POS_LABEL_CLASS)['data-pos']
if active and roster_position == 'IL':
continue
player = self._get_player_info(player_row)
if player:
player['roster_position'] = roster_position
roster.append(player)
return roster
def _get_player_info(self, player_row):
player_info_element = player_row.find(class_=PLAYER_NAME_CLASS)
player_name_element = player_info_element.find("a")
if not player_name_element:
return None
href = player_name_element['href']
name = player_name_element.text
team_pos = player_info_element.find("span").text.split(' - ')
team = team_pos[0].upper()
eligible_positions = team_pos[1].split(',')
return { 'name' : name, 'href' : href, 'team' : team, \
'eligible_positions' : eligible_positions }
def _get_teams_info(self):
standings = self._driver.find_element_by_id("standingstable").get_attribute('innerHTML')
standings_soup = BeautifulSoup(standings, 'html.parser')
team_rows = standings_soup.find('tbody').find_all('tr')
print("Generating teams' information")
teams_info = []
for team_row in team_rows:
id = int(team_row['data-target'].split('/')[-1])
team_name_element = team_row.find(class_=TEAM_NAME_CLASS)
name = team_name_element.text
href = YAHOO_FANTASY_URL + team_name_element['href']
record = team_row.find(class_=WIN_LOSS_DRAW_CLASS).text.split('-')
wins = int(record[0])
losses = int(record[1])
draws = int(record[2])
teams_info.append({
'id' : id,
'name' : name,
'href' : href,
'wins' : wins,
'losses' : losses,
'draws' : draws
})
return teams_info
def _assert_current_page_matches_league(self):
if not re.search(YAHOO_FANTASY_URL + '/nba/\d+.*?$', self._driver.current_url):
print("Current page is not a league page. Exiting.")
sys.exit() | none | 1 | 2.81903 | 3 | |
py/leetcode_easy/buy_sell_stock.py | mp40/Whiteboard | 0 | 6614100 | def maxProfit(prices):
profit = 0
buy = prices[0]
i = 1
while i < len(prices):
if prices[i] < buy:
buy = prices[i]
elif profit < prices[i] - buy:
profit = prices[i] - buy
i += 1
return profit
| def maxProfit(prices):
profit = 0
buy = prices[0]
i = 1
while i < len(prices):
if prices[i] < buy:
buy = prices[i]
elif profit < prices[i] - buy:
profit = prices[i] - buy
i += 1
return profit
| none | 1 | 3.325115 | 3 | |
scripts/eval_noisy_gen.py | nng555/fairseq | 2 | 6614101 | <reponame>nng555/fairseq
from fairseq.models.roberta import RobertaModel
from fairseq.models.lstm_classifier import LSTMClassifier
from fairseq.models.fconv_classifier import FConvClassifier
import argparse
import json
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import hydra
from operator import add
from omegaconf import DictConfig
from hydra import slurm_utils
@hydra.main(config_path=os.path.expandvars('$HOME/conf/$PROJ'), config_name='config')
def evaluate_model(cfg: DictConfig):
slurm_utils.symlink_hydra(cfg, os.getcwd())
if cfg.data.task in ['nli']:
base_path = '/scratch/ssd001/datasets/'
elif cfg.data.task in ['sentiment']:
base_path = '/h/nng/data'
else:
raise Exception('task %s data path not found'.format(cfg.data.task))
model_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.model.data)
eval_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.data)
if cfg.eval.model.date:
model_path = os.path.join('/h/nng/slurm', cfg.eval.model.date, slurm_utils.resolve_name(cfg.eval.model.name))
if not os.path.exists(os.path.join(model_path, 'checkpoint_best.pt')):
for f in sorted(os.listdir(model_path))[::-1]:
if os.path.exists(os.path.join(model_path, f, 'checkpoint_best.pt')):
model_path = os.path.join(model_path, f)
break
else:
model_path = os.path.join('/checkpoint/nng/keep', slurm_utils.resolve_name(cfg.eval.model.name))
dict_path = os.path.join(model_data_path, cfg.data.fdset, cfg.eval.model.bin, 'bin')
print(dict_path)
ckpt_file = 'checkpoint_best.pt'
print(model_path)
if 'roberta' in cfg.train.arch:
model = RobertaModel.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
elif cfg.train.arch == 'fconv_classifier':
model = FConvClassifier.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
elif cfg.train.arch == 'lstm_classifier':
model = LSTMClassifier.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
else:
raise Exception("Arch %s not supported".format(cfg.train.arch))
label_fn = lambda label: model.task.label_dictionary.string(
[label + model.task.label_dictionary.nspecial]
)
model.cuda()
model.eval()
# check for existing res json here
with open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.gen.input0')) as input0f, \
open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.id')) as idf, \
open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.raw.label')) as targetf:
input0 = input0f.readlines()
target = targetf.readlines()
ids = idf.readlines()
if cfg.data.task in ['nli']:
input1f = open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.raw.input1'))
input1 = input1f.readlines()
files = [input0, input1, target, ids]
else:
files = [input0, target, ids]
res = {}
targets = {}
nsamples = 0
j_dir = slurm_utils.get_j_dir(cfg)
status_path = os.path.join(j_dir, 'eval_status.json')
if os.path.exists(status_path):
eval_status = json.load(open(status_path))
res, targets, nsamples = eval_status
files = [f[nsamples::] for f in files]
batch_num = math.ceil(len(files[0])/cfg.eval.batch)
for b in range(batch_num):
bstart = b * cfg.eval.batch
bend = (b+1) * cfg.eval.batch
if bstart >= len(files[0]):
break
print("Processing {} to {}".format(str(bstart), str(bend)), flush=True)
if cfg.data.task in ['nli']:
toks = [model.encode(s1.strip(), s2.strip()) for s1, s2 in zip(input0[bstart:bend], input1[bstart:bend])]
else:
toks = [model.encode(s1.strip()) for s1 in input0[bstart:bend]]
max_len = max([len(tok) for tok in toks])
toks = [F.pad(tok, (0, max_len - len(tok)), 'constant', model.task.source_dictionary.pad()) for tok in toks]
toks = torch.stack(toks).cuda()
if cfg.data.name != 'mnli':
if len(toks[0]) > model.max_positions[0]:
continue
pred_probs = model.predict(
'sentence_classification_head',
toks,
return_logits=(cfg.train.regression_target or cfg.train.ordinal)
).cpu().detach().numpy()
for b_id, t in zip(ids[bstart:bend], target[bstart:bend]):
b_id = b_id.strip()
t = t.strip()
if b_id not in res:
res[b_id] = [0.0 for _ in range(cfg.data.num_classes)]
if b_id not in targets:
targets[b_id] = t
for b_id, pred_prob in zip(ids[bstart:bend], pred_probs):
b_id = b_id.strip()
if cfg.train.regression_target:
res[b_id].append((pred_prob[0] - float(ex[-1]))**2)
res[b_id] += (pred_prob[0] - float(ex[-1]))**2
else:
if cfg.train.ordinal:
pred_prob = 1 / (1 + np.exp(-pred_prob))
pred_prob = pred_prob - np.pad(pred_prob[1:], (0,1), 'constant')
pred_prob = np.exp(pred_prob)/sum(np.exp(pred_prob))
pred_prob= pred_prob.tolist()
res[b_id] = list(map(add, res[b_id], pred_prob))
nsamples += len(pred_probs)
with open(status_path, 'w') as statusf:
json.dump([res, targets, nsamples], statusf)
nval = 0
for k in res:
prediction = np.asarray(res[k]).argmax()
prediction_label = label_fn(prediction)
nval += int(prediction_label == targets[k])
print(cfg.data.tdset + ' | Accuracy: ', float(nval)/len(res))
if __name__ == '__main__':
evaluate_model()
| from fairseq.models.roberta import RobertaModel
from fairseq.models.lstm_classifier import LSTMClassifier
from fairseq.models.fconv_classifier import FConvClassifier
import argparse
import json
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import hydra
from operator import add
from omegaconf import DictConfig
from hydra import slurm_utils
@hydra.main(config_path=os.path.expandvars('$HOME/conf/$PROJ'), config_name='config')
def evaluate_model(cfg: DictConfig):
slurm_utils.symlink_hydra(cfg, os.getcwd())
if cfg.data.task in ['nli']:
base_path = '/scratch/ssd001/datasets/'
elif cfg.data.task in ['sentiment']:
base_path = '/h/nng/data'
else:
raise Exception('task %s data path not found'.format(cfg.data.task))
model_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.model.data)
eval_data_path = os.path.join(base_path, cfg.data.task, cfg.eval.data)
if cfg.eval.model.date:
model_path = os.path.join('/h/nng/slurm', cfg.eval.model.date, slurm_utils.resolve_name(cfg.eval.model.name))
if not os.path.exists(os.path.join(model_path, 'checkpoint_best.pt')):
for f in sorted(os.listdir(model_path))[::-1]:
if os.path.exists(os.path.join(model_path, f, 'checkpoint_best.pt')):
model_path = os.path.join(model_path, f)
break
else:
model_path = os.path.join('/checkpoint/nng/keep', slurm_utils.resolve_name(cfg.eval.model.name))
dict_path = os.path.join(model_data_path, cfg.data.fdset, cfg.eval.model.bin, 'bin')
print(dict_path)
ckpt_file = 'checkpoint_best.pt'
print(model_path)
if 'roberta' in cfg.train.arch:
model = RobertaModel.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
elif cfg.train.arch == 'fconv_classifier':
model = FConvClassifier.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
elif cfg.train.arch == 'lstm_classifier':
model = LSTMClassifier.from_pretrained(
model_path,
checkpoint_file=ckpt_file,
data_name_or_path = dict_path
)
else:
raise Exception("Arch %s not supported".format(cfg.train.arch))
label_fn = lambda label: model.task.label_dictionary.string(
[label + model.task.label_dictionary.nspecial]
)
model.cuda()
model.eval()
# check for existing res json here
with open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.gen.input0')) as input0f, \
open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.id')) as idf, \
open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.raw.label')) as targetf:
input0 = input0f.readlines()
target = targetf.readlines()
ids = idf.readlines()
if cfg.data.task in ['nli']:
input1f = open(os.path.join(eval_data_path, cfg.data.tdset, cfg.data.bin.name, cfg.eval.split + '.raw.input1'))
input1 = input1f.readlines()
files = [input0, input1, target, ids]
else:
files = [input0, target, ids]
res = {}
targets = {}
nsamples = 0
j_dir = slurm_utils.get_j_dir(cfg)
status_path = os.path.join(j_dir, 'eval_status.json')
if os.path.exists(status_path):
eval_status = json.load(open(status_path))
res, targets, nsamples = eval_status
files = [f[nsamples::] for f in files]
batch_num = math.ceil(len(files[0])/cfg.eval.batch)
for b in range(batch_num):
bstart = b * cfg.eval.batch
bend = (b+1) * cfg.eval.batch
if bstart >= len(files[0]):
break
print("Processing {} to {}".format(str(bstart), str(bend)), flush=True)
if cfg.data.task in ['nli']:
toks = [model.encode(s1.strip(), s2.strip()) for s1, s2 in zip(input0[bstart:bend], input1[bstart:bend])]
else:
toks = [model.encode(s1.strip()) for s1 in input0[bstart:bend]]
max_len = max([len(tok) for tok in toks])
toks = [F.pad(tok, (0, max_len - len(tok)), 'constant', model.task.source_dictionary.pad()) for tok in toks]
toks = torch.stack(toks).cuda()
if cfg.data.name != 'mnli':
if len(toks[0]) > model.max_positions[0]:
continue
pred_probs = model.predict(
'sentence_classification_head',
toks,
return_logits=(cfg.train.regression_target or cfg.train.ordinal)
).cpu().detach().numpy()
for b_id, t in zip(ids[bstart:bend], target[bstart:bend]):
b_id = b_id.strip()
t = t.strip()
if b_id not in res:
res[b_id] = [0.0 for _ in range(cfg.data.num_classes)]
if b_id not in targets:
targets[b_id] = t
for b_id, pred_prob in zip(ids[bstart:bend], pred_probs):
b_id = b_id.strip()
if cfg.train.regression_target:
res[b_id].append((pred_prob[0] - float(ex[-1]))**2)
res[b_id] += (pred_prob[0] - float(ex[-1]))**2
else:
if cfg.train.ordinal:
pred_prob = 1 / (1 + np.exp(-pred_prob))
pred_prob = pred_prob - np.pad(pred_prob[1:], (0,1), 'constant')
pred_prob = np.exp(pred_prob)/sum(np.exp(pred_prob))
pred_prob= pred_prob.tolist()
res[b_id] = list(map(add, res[b_id], pred_prob))
nsamples += len(pred_probs)
with open(status_path, 'w') as statusf:
json.dump([res, targets, nsamples], statusf)
nval = 0
for k in res:
prediction = np.asarray(res[k]).argmax()
prediction_label = label_fn(prediction)
nval += int(prediction_label == targets[k])
print(cfg.data.tdset + ' | Accuracy: ', float(nval)/len(res))
if __name__ == '__main__':
evaluate_model() | en | 0.286902 | # check for existing res json here | 1.833407 | 2 |
sketches/rotatetranslate2/rotatetranslate2.pyde | kantel/processingpy | 4 | 6614102 | t = 0
def setup():
size(600, 600)
this.surface.setTitle("Rotate und Translate")
rectMode(CENTER)
def draw():
global t
background(0, 0, 255)
translate(width/2, height/2)
rotate(radians(t))
fill(255, 255, 0)
for i in range(12):
with pushMatrix():
translate(200, 0)
rotate(radians(3*t))
rect(0, 0, 50, 50)
rotate(radians(360/12))
t += 1
| t = 0
def setup():
size(600, 600)
this.surface.setTitle("Rotate und Translate")
rectMode(CENTER)
def draw():
global t
background(0, 0, 255)
translate(width/2, height/2)
rotate(radians(t))
fill(255, 255, 0)
for i in range(12):
with pushMatrix():
translate(200, 0)
rotate(radians(3*t))
rect(0, 0, 50, 50)
rotate(radians(360/12))
t += 1
| none | 1 | 3.311069 | 3 | |
Python/7/NoOdditiesHere/no_oddities_here.py | hwakabh/codewars | 0 | 6614103 | import sys
def no_odds(values):
return [j for j in values if j % 2 == 0]
if __name__ == "__main__":
if len(sys.argv) == 1:
n = [int(i) for i in input('>>> Enter some numbers with comma-separated: ').split(',')]
print(no_odds(values=n))
else:
sys.exit(1)
| import sys
def no_odds(values):
return [j for j in values if j % 2 == 0]
if __name__ == "__main__":
if len(sys.argv) == 1:
n = [int(i) for i in input('>>> Enter some numbers with comma-separated: ').split(',')]
print(no_odds(values=n))
else:
sys.exit(1)
| none | 1 | 3.546948 | 4 | |
docs/_build/jupyter_execute/index.py | khalidcawl/tech_salary_predictor_canada_us | 0 | 6614104 | #!/usr/bin/env python
# coding: utf-8
# # Project proposal
#
# Demo of a data analysis project for DSCI 522 (Data Science workflows); a course in the Master of Data Science program at the University of British Columbia.
#
# ## About
#
# The aim of this project is to allow tech employees in Canada to get a reasonable estimation of how much they will potentially earn given their skill set and years of experience. Fresh graduates and seasoned employees would benefit from an analysis tool that allows them to predict their earning potential. While the Human Resources (HR) department of companies has access to this market information, tech employees are mostly clueless about what their market value is. Therefore, a salary predictor tool could assist them in the negotiation process.
#
# Using the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey) data, we did Exploratory Data analysis (EDA) and came up with some features that may help predict the expected yearly compensation of tech employees. We describe the data, selected features, and the modeling process which you should be able to reproduce following the usage steps.
#
# ## Data
#
# The data set used in this project is from the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey), which is conducted annually. The survey data set has nearly 80,000 responses. There are several useful features that could be extracted from this survey such as education level, location, the language used, job type, all of which are potentially associated with annual compensation.
#
# ## Usage
#
# ### Dependencies
# - R version 3.6.1 and R packages:
# - tidyverse==1.3.1
# - caret==6.0.90
# - docopt==0.7.1
# - testthat=3.0.4
#
# The Python dependencies can be found in the tech_salary_pred_env.yaml file. However, you don’t have to manually install these dependencies. You need to install conda (v4.10.3) and then follow the installation instructions described below.
#
# Suggested way to download data:
#
# 1. Clone this Github repository
# 2. Go to [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey)
# 3. Copy the link of the csv file, taking 2019 result as an example:
# https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip
# 4. Run the following command at the terminal from the root directory of this project:
#
# #### Option 1 - Using makefile
#
# To replicate the analysis, clone this GitHub repository, install the dependencies listed above, and run the following command at the command line / terminal from the root directory of this project: `make all`
#
# To reset the repo to a clean state, with no intermediate or results files, run the following command at the command line/terminal from the root directory of this project: `make clean`
#
#
# #### Option 2 - Executing the scripts individually
#
# ```
# # create conda environment
# conda env create -f tech_salary_pred_env.yaml
# conda activate tech_salary_pred_env
#
# # download data
# python src/download_data.py --url=https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip --out_dir=data/raw
#
# # pre-process data
# Rscript src/preprocessing.R --input=data/raw/survey_results_public.csv --out_dir=data/processed
#
# # run eda report
# python src/eda.py --train=data/processed/training.csv --out_dir=results/
#
# # modelling
# python src/salary_prediction_model.py --train=data/processed/training.csv --out_dir=results --test=data/processed/test.csv
#
# # render report
# jupyter-book build docs
# ```
#
# ## Report
# The final report can be found [here](https://ubc-mds.github.io/tech_salary_predictor_canada_us/report.html)
#
# ### Exploratory data analysis
#
# After carrying out EDA, we realize that the following features might help us predict the salary of tech workers: years of experience, education level, programming languages used, and their role (full-stack developer, front-end developer, etc.)
#
# Based on the data corresponding to the four features in the survey, we created plots to visualize how the four features impact on the annual compensation of developers. As we can see from the plots, a large professional coding years and certain programming language skills(Bash,C,SQL,HTML,Java) would have positive influence on the average annual compensation. Then we dig a little deeper by visualizing the combination of these two features in a heatmap, which shows the average annual compensation given the number of professional coding years and combination of programming language skills.
#
# ### Modelling
#
# We built a multiple linear regression model to see the relationship between these features and the annual compensation.
# To simplify our analysis, we focused on Canada. We initially thought of building the model on Canada data. However, we learned from our EDA that there is pay disparity and currency conversion involved. In the future, we plan to include USA in the model and handle those discrepancies.
#
# Additionally, the regression model can also provide other important information such as the weight of each feature or how much each of those features we picked contributes to the overall predicted annual compensation.
#
# To evaluate our model and selected features, we plan to use R squared score as the score metric and see how well the tuned model can generalize on the test data. For the purpose of visualizing the performance of our prediction, we will plot the regression line on the scattered data points to infer whether the relationship between features and response is linear or not.
#
# The research question is:
# > To predict the expected salary of a tech employee in Canada given their number of years of experience, education level, programming languages used, and role
#
# This topic is a predictive question, also extending to following sub-questions:
# 1. Which features have significantly statistical effect on the response (inference)
# 2. Whether the model is robust (model estimation, a predictive problem)
# 3. The confidence interval of predictions (predictive)
#
# ## LICENSE
# This database - The Public 2019 Stack Overflow Developer Survey Results - is made available under the Open Database License (ODbL): http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# # Project proposal
#
# Demo of a data analysis project for DSCI 522 (Data Science workflows); a course in the Master of Data Science program at the University of British Columbia.
#
# ## About
#
# The aim of this project is to allow tech employees in Canada to get a reasonable estimation of how much they will potentially earn given their skill set and years of experience. Fresh graduates and seasoned employees would benefit from an analysis tool that allows them to predict their earning potential. While the Human Resources (HR) department of companies has access to this market information, tech employees are mostly clueless about what their market value is. Therefore, a salary predictor tool could assist them in the negotiation process.
#
# Using the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey) data, we did Exploratory Data analysis (EDA) and came up with some features that may help predict the expected yearly compensation of tech employees. We describe the data, selected features, and the modeling process which you should be able to reproduce following the usage steps.
#
# ## Data
#
# The data set used in this project is from the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey), which is conducted annually. The survey data set has nearly 80,000 responses. There are several useful features that could be extracted from this survey such as education level, location, the language used, job type, all of which are potentially associated with annual compensation.
#
# ## Usage
#
# ### Dependencies
# - R version 3.6.1 and R packages:
# - tidyverse==1.3.1
# - caret==6.0.90
# - docopt==0.7.1
# - testthat=3.0.4
#
# The Python dependencies can be found in the tech_salary_pred_env.yaml file. However, you don’t have to manually install these dependencies. You need to install conda (v4.10.3) and then follow the installation instructions described below.
#
# Suggested way to download data:
#
# 1. Clone this Github repository
# 2. Go to [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey)
# 3. Copy the link of the csv file, taking 2019 result as an example:
# https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip
# 4. Run the following command at the terminal from the root directory of this project:
#
# #### Option 1 - Using makefile
#
# To replicate the analysis, clone this GitHub repository, install the dependencies listed above, and run the following command at the command line / terminal from the root directory of this project: `make all`
#
# To reset the repo to a clean state, with no intermediate or results files, run the following command at the command line/terminal from the root directory of this project: `make clean`
#
#
# #### Option 2 - Executing the scripts individually
#
# ```
# # create conda environment
# conda env create -f tech_salary_pred_env.yaml
# conda activate tech_salary_pred_env
#
# # download data
# python src/download_data.py --url=https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip --out_dir=data/raw
#
# # pre-process data
# Rscript src/preprocessing.R --input=data/raw/survey_results_public.csv --out_dir=data/processed
#
# # run eda report
# python src/eda.py --train=data/processed/training.csv --out_dir=results/
#
# # modelling
# python src/salary_prediction_model.py --train=data/processed/training.csv --out_dir=results --test=data/processed/test.csv
#
# # render report
# jupyter-book build docs
# ```
#
# ## Report
# The final report can be found [here](https://ubc-mds.github.io/tech_salary_predictor_canada_us/report.html)
#
# ### Exploratory data analysis
#
# After carrying out EDA, we realize that the following features might help us predict the salary of tech workers: years of experience, education level, programming languages used, and their role (full-stack developer, front-end developer, etc.)
#
# Based on the data corresponding to the four features in the survey, we created plots to visualize how the four features impact on the annual compensation of developers. As we can see from the plots, a large professional coding years and certain programming language skills(Bash,C,SQL,HTML,Java) would have positive influence on the average annual compensation. Then we dig a little deeper by visualizing the combination of these two features in a heatmap, which shows the average annual compensation given the number of professional coding years and combination of programming language skills.
#
# ### Modelling
#
# We built a multiple linear regression model to see the relationship between these features and the annual compensation.
# To simplify our analysis, we focused on Canada. We initially thought of building the model on Canada data. However, we learned from our EDA that there is pay disparity and currency conversion involved. In the future, we plan to include USA in the model and handle those discrepancies.
#
# Additionally, the regression model can also provide other important information such as the weight of each feature or how much each of those features we picked contributes to the overall predicted annual compensation.
#
# To evaluate our model and selected features, we plan to use R squared score as the score metric and see how well the tuned model can generalize on the test data. For the purpose of visualizing the performance of our prediction, we will plot the regression line on the scattered data points to infer whether the relationship between features and response is linear or not.
#
# The research question is:
# > To predict the expected salary of a tech employee in Canada given their number of years of experience, education level, programming languages used, and role
#
# This topic is a predictive question, also extending to following sub-questions:
# 1. Which features have significantly statistical effect on the response (inference)
# 2. Whether the model is robust (model estimation, a predictive problem)
# 3. The confidence interval of predictions (predictive)
#
# ## LICENSE
# This database - The Public 2019 Stack Overflow Developer Survey Results - is made available under the Open Database License (ODbL): http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/
# In[ ]:
| en | 0.85959 | #!/usr/bin/env python # coding: utf-8 # # Project proposal # # Demo of a data analysis project for DSCI 522 (Data Science workflows); a course in the Master of Data Science program at the University of British Columbia. # # ## About # # The aim of this project is to allow tech employees in Canada to get a reasonable estimation of how much they will potentially earn given their skill set and years of experience. Fresh graduates and seasoned employees would benefit from an analysis tool that allows them to predict their earning potential. While the Human Resources (HR) department of companies has access to this market information, tech employees are mostly clueless about what their market value is. Therefore, a salary predictor tool could assist them in the negotiation process. # # Using the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey) data, we did Exploratory Data analysis (EDA) and came up with some features that may help predict the expected yearly compensation of tech employees. We describe the data, selected features, and the modeling process which you should be able to reproduce following the usage steps. # # ## Data # # The data set used in this project is from the [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey), which is conducted annually. The survey data set has nearly 80,000 responses. There are several useful features that could be extracted from this survey such as education level, location, the language used, job type, all of which are potentially associated with annual compensation. # # ## Usage # # ### Dependencies # - R version 3.6.1 and R packages: # - tidyverse==1.3.1 # - caret==6.0.90 # - docopt==0.7.1 # - testthat=3.0.4 # # The Python dependencies can be found in the tech_salary_pred_env.yaml file. However, you don’t have to manually install these dependencies. You need to install conda (v4.10.3) and then follow the installation instructions described below. # # Suggested way to download data: # # 1. Clone this Github repository # 2. Go to [Stack Overflow Annual Developer Survey](https://insights.stackoverflow.com/survey) # 3. Copy the link of the csv file, taking 2019 result as an example: # https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip # 4. Run the following command at the terminal from the root directory of this project: # # #### Option 1 - Using makefile # # To replicate the analysis, clone this GitHub repository, install the dependencies listed above, and run the following command at the command line / terminal from the root directory of this project: `make all` # # To reset the repo to a clean state, with no intermediate or results files, run the following command at the command line/terminal from the root directory of this project: `make clean` # # # #### Option 2 - Executing the scripts individually # # ``` # # create conda environment # conda env create -f tech_salary_pred_env.yaml # conda activate tech_salary_pred_env # # # download data # python src/download_data.py --url=https://info.stackoverflowsolutions.com/rs/719-EMH-566/images/stack-overflow-developer-survey-2019.zip --out_dir=data/raw # # # pre-process data # Rscript src/preprocessing.R --input=data/raw/survey_results_public.csv --out_dir=data/processed # # # run eda report # python src/eda.py --train=data/processed/training.csv --out_dir=results/ # # # modelling # python src/salary_prediction_model.py --train=data/processed/training.csv --out_dir=results --test=data/processed/test.csv # # # render report # jupyter-book build docs # ``` # # ## Report # The final report can be found [here](https://ubc-mds.github.io/tech_salary_predictor_canada_us/report.html) # # ### Exploratory data analysis # # After carrying out EDA, we realize that the following features might help us predict the salary of tech workers: years of experience, education level, programming languages used, and their role (full-stack developer, front-end developer, etc.) # # Based on the data corresponding to the four features in the survey, we created plots to visualize how the four features impact on the annual compensation of developers. As we can see from the plots, a large professional coding years and certain programming language skills(Bash,C,SQL,HTML,Java) would have positive influence on the average annual compensation. Then we dig a little deeper by visualizing the combination of these two features in a heatmap, which shows the average annual compensation given the number of professional coding years and combination of programming language skills. # # ### Modelling # # We built a multiple linear regression model to see the relationship between these features and the annual compensation. # To simplify our analysis, we focused on Canada. We initially thought of building the model on Canada data. However, we learned from our EDA that there is pay disparity and currency conversion involved. In the future, we plan to include USA in the model and handle those discrepancies. # # Additionally, the regression model can also provide other important information such as the weight of each feature or how much each of those features we picked contributes to the overall predicted annual compensation. # # To evaluate our model and selected features, we plan to use R squared score as the score metric and see how well the tuned model can generalize on the test data. For the purpose of visualizing the performance of our prediction, we will plot the regression line on the scattered data points to infer whether the relationship between features and response is linear or not. # # The research question is: # > To predict the expected salary of a tech employee in Canada given their number of years of experience, education level, programming languages used, and role # # This topic is a predictive question, also extending to following sub-questions: # 1. Which features have significantly statistical effect on the response (inference) # 2. Whether the model is robust (model estimation, a predictive problem) # 3. The confidence interval of predictions (predictive) # # ## LICENSE # This database - The Public 2019 Stack Overflow Developer Survey Results - is made available under the Open Database License (ODbL): http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ # In[ ]: | 2.639588 | 3 |
common/util/web/FWUpdatePage.py | vaibhav92/op-test-framework | 0 | 6614105 | <reponame>vaibhav92/op-test-framework<filename>common/util/web/FWUpdatePage.py
#!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/../FWUpdatePage.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from Page import Page
from seleniumimports import *
from BmcPageConstants import BmcPageConstants
from selenium.webdriver.support.ui import Select
from OpTestConstants import OpTestConstants as BMC_CONST
from OpTestError import OpTestError
import time
##
# @file: FWUpdatePage.py
# @brief: This file contains functions to browse through FW Update Menus and
# manage FW Update related Pages
#
##
# FWUpdatePage
# @brief: This class manages interaction with FW Update
# menus and webpages
#
class FWUpdatePage():
##
# @brief Constructor - Takes a pointer to BMC WebDriver
# @param page instance
# @return none
#
def __init__(self, page):
self.Page = page
##
# @brief Function to traverse to BMC FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def getUpdateOptionsPage(self):
try:
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
except TimeoutException:
print("FWUpdate_Page::getUpdateOptionsPage - \
No alert present. Moving forward")
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
FWUpdate = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE)))
FWUpdate.click()
FWUpdate_menu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_MENU)))
FWUpdate_submenu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_HREF)))
FWUpdate_submenu.click()
return BMC_CONST.FW_SUCCESS
##
# @brief Function to traverse to BMC FW Protocol Configuration Page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def getProtocolConfigPage(self):
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
FWUpdate = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE)))
FWUpdate.click()
FWUpdate_menu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_MENU)))
FWUpdate_submenu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
BmcPageConstants.BMC_LN_PROTOCOL_CONFIG_HREF)))
FWUpdate_submenu.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects AMI option in the FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectAMI(self):
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
FWUpdate_AMI = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_AMI_RADIO_BTN)))
FWUpdate_AMI.click()
FWUpdate_AMI = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).\
until(EC.presence_of_element_located((By.ID,BmcPageConstants.BMC_CONTINUE_BTN)))
FWUpdate_AMI.click()
print("Selected AMI Option")
return BMC_CONST.FW_SUCCESS
##
# @brief This function hits continue button on all FW Update web pages
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doContinue(self):
FWUpdate_EnterUpdateMode = WebDriverWait(self.Page.driver,
BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_FWUPDATE_BTN)))
FWUpdate_EnterUpdateMode.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).\
until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects HPM option in the FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectHPM(self):
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
FWUpdate_HPM = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_HPM_RADIO_BTN)))
FWUpdate_HPM.click()
FWUpdate_HPM = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_CONTINUE_BTN)))
FWUpdate_HPM.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
self.Page.driver.switch_to.alert.accept()
print("Selected HPM Option")
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects Protocol options from the drop down menu
# in Protocol config page(This is hard-coded to select TFTP protocol for now)
#
# @param protocol - String which identified hwat protocol to select. This
# string should match the options listed in BMC drop down menu
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectProtocolType(self, protocol):
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_PAGEFRAME)))
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
for i in range(1,BmcPageConstants.WEB_PROTOCOL_SELECT_RETRY):
time.sleep(BMC_CONST.WEB_DRIVER_WAIT)
#This is hard-coded to select TFTP protocol for now
FWUpdate_protocoltype = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_TFTP_OPTION)))
for option in FWUpdate_protocoltype.find_elements_by_tag_name(
BmcPageConstants.BMC_OPTION_TAG):
if option.text == protocol:
option.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function updates text field which contains server hosting
# BMC image
#
# @param addr - Fills out IP address of server providing the BMC image
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def inputServerAddress(self, addr):
FWUpdate_protocoltype = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_SERVER_ADDR_TEXT_AREA)))
FWUpdate_protocoltype.clear()
FWUpdate_protocoltype.send_keys(addr)
print("Server Address: " + addr)
return BMC_CONST.FW_SUCCESS
##
# @brief This function updates imagename field. Full path to the image
# needs to be provided
#
# @param image - full path to the BMC image
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def inputImageName(self, image):
FWUpdate_imagename = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_IMAGE_PATH_TEXT_AREA)))
FWUpdate_imagename.clear()
FWUpdate_imagename.send_keys(image)
print("Server Image: " + image)
return BMC_CONST.FW_SUCCESS
##
# @brief This function saves the updated protocol configuration. This page
# prompts a javascript alert which will be accepted
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doSave(self):
FWUpdate_Save = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_SAVE_BTN)))
FWUpdate_Save.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
print("Protocol Config Saved")
return BMC_CONST.FW_SUCCESS
##
# @brief This function provides the path to a BMC FW Image file
#
# @param Full path to the BMC image file
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectFile(self, path):
FWUpdate_FileSelect = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_UPLOAD_FILE)))
FWUpdate_FileSelect.send_keys(path)
return BMC_CONST.FW_SUCCESS
##
# @brief This function clicks the OK button at FW Update option
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doOK(self):
FWUpdate_OK_BUTTON = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.XPATH,
BmcPageConstants.BMC_OK_BTN)))
FWUpdate_OK_BUTTON.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects all FW images to be updated BIOS and Boot-App
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateAll(self):
FWUpdate_SELECT_BIOS_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BIOS_UPDATE_OPTION)))
FWUpdate_SELECT_BIOS_RADIO.click()
FWUpdate_SELECT_BOOT_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_UPDATE_OPTION)))
FWUpdate_SELECT_BOOT_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects only BIOS FW images to be updated
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateBios(self):
FWUpdate_SELECT_BIOS_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BIOS_UPDATE_OPTION)))
FWUpdate_SELECT_BIOS_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects only BIOS FW images to be updated
#
# @param None
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateBoot_APP(self):
FWUpdate_SELECT_BOOT_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_UPDATE_OPTION)))
FWUpdate_SELECT_BOOT_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects proceed button
#
# @param None
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doProceed(self):
FWUpdate_PROCEED_BUTTON = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_PROCEED)))
FWUpdate_PROCEED_BUTTON.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
return BMC_CONST.FW_SUCCESS
##
# @brief This function waits for fw update to be completed. Expectation
# is that an alert box will popup at the end of the FW update
#
# @param timeout @type int time to wait for an alert to be present
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def WaitForFWUpdateComplete(self,timeout):
try:
WebDriverWait(self.Page.driver,timeout).until(EC.alert_is_present())
alert=self.Page.driver.switch_to.alert.accept()
except TimeoutException:
print("FWUpdate_Page::WaitForFWUpdateComplete- \
No alert present. FW Update may not have \
completed successfully. Need to check!!")
return BMC_CONST.FW_SUCCESS
| #!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/../FWUpdatePage.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from Page import Page
from seleniumimports import *
from BmcPageConstants import BmcPageConstants
from selenium.webdriver.support.ui import Select
from OpTestConstants import OpTestConstants as BMC_CONST
from OpTestError import OpTestError
import time
##
# @file: FWUpdatePage.py
# @brief: This file contains functions to browse through FW Update Menus and
# manage FW Update related Pages
#
##
# FWUpdatePage
# @brief: This class manages interaction with FW Update
# menus and webpages
#
class FWUpdatePage():
##
# @brief Constructor - Takes a pointer to BMC WebDriver
# @param page instance
# @return none
#
def __init__(self, page):
self.Page = page
##
# @brief Function to traverse to BMC FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def getUpdateOptionsPage(self):
try:
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
except TimeoutException:
print("FWUpdate_Page::getUpdateOptionsPage - \
No alert present. Moving forward")
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
FWUpdate = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE)))
FWUpdate.click()
FWUpdate_menu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_MENU)))
FWUpdate_submenu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_HREF)))
FWUpdate_submenu.click()
return BMC_CONST.FW_SUCCESS
##
# @brief Function to traverse to BMC FW Protocol Configuration Page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def getProtocolConfigPage(self):
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
FWUpdate = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE)))
FWUpdate.click()
FWUpdate_menu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_FIRMWARE_UPDATE_MENU)))
FWUpdate_submenu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
BmcPageConstants.BMC_LN_PROTOCOL_CONFIG_HREF)))
FWUpdate_submenu.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects AMI option in the FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectAMI(self):
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
FWUpdate_AMI = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_AMI_RADIO_BTN)))
FWUpdate_AMI.click()
FWUpdate_AMI = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).\
until(EC.presence_of_element_located((By.ID,BmcPageConstants.BMC_CONTINUE_BTN)))
FWUpdate_AMI.click()
print("Selected AMI Option")
return BMC_CONST.FW_SUCCESS
##
# @brief This function hits continue button on all FW Update web pages
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doContinue(self):
FWUpdate_EnterUpdateMode = WebDriverWait(self.Page.driver,
BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_FWUPDATE_BTN)))
FWUpdate_EnterUpdateMode.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).\
until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects HPM option in the FW Update page
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectHPM(self):
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
FWUpdate_HPM = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_HPM_RADIO_BTN)))
FWUpdate_HPM.click()
FWUpdate_HPM = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_CONTINUE_BTN)))
FWUpdate_HPM.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
self.Page.driver.switch_to.alert.accept()
print("Selected HPM Option")
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects Protocol options from the drop down menu
# in Protocol config page(This is hard-coded to select TFTP protocol for now)
#
# @param protocol - String which identified hwat protocol to select. This
# string should match the options listed in BMC drop down menu
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectProtocolType(self, protocol):
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_PAGEFRAME)))
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
for i in range(1,BmcPageConstants.WEB_PROTOCOL_SELECT_RETRY):
time.sleep(BMC_CONST.WEB_DRIVER_WAIT)
#This is hard-coded to select TFTP protocol for now
FWUpdate_protocoltype = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_TFTP_OPTION)))
for option in FWUpdate_protocoltype.find_elements_by_tag_name(
BmcPageConstants.BMC_OPTION_TAG):
if option.text == protocol:
option.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function updates text field which contains server hosting
# BMC image
#
# @param addr - Fills out IP address of server providing the BMC image
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def inputServerAddress(self, addr):
FWUpdate_protocoltype = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_SERVER_ADDR_TEXT_AREA)))
FWUpdate_protocoltype.clear()
FWUpdate_protocoltype.send_keys(addr)
print("Server Address: " + addr)
return BMC_CONST.FW_SUCCESS
##
# @brief This function updates imagename field. Full path to the image
# needs to be provided
#
# @param image - full path to the BMC image
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def inputImageName(self, image):
FWUpdate_imagename = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_IMAGE_PATH_TEXT_AREA)))
FWUpdate_imagename.clear()
FWUpdate_imagename.send_keys(image)
print("Server Image: " + image)
return BMC_CONST.FW_SUCCESS
##
# @brief This function saves the updated protocol configuration. This page
# prompts a javascript alert which will be accepted
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doSave(self):
FWUpdate_Save = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_SAVE_BTN)))
FWUpdate_Save.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
print("Protocol Config Saved")
return BMC_CONST.FW_SUCCESS
##
# @brief This function provides the path to a BMC FW Image file
#
# @param Full path to the BMC image file
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectFile(self, path):
FWUpdate_FileSelect = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_UPLOAD_FILE)))
FWUpdate_FileSelect.send_keys(path)
return BMC_CONST.FW_SUCCESS
##
# @brief This function clicks the OK button at FW Update option
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doOK(self):
FWUpdate_OK_BUTTON = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.XPATH,
BmcPageConstants.BMC_OK_BTN)))
FWUpdate_OK_BUTTON.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects all FW images to be updated BIOS and Boot-App
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateAll(self):
FWUpdate_SELECT_BIOS_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BIOS_UPDATE_OPTION)))
FWUpdate_SELECT_BIOS_RADIO.click()
FWUpdate_SELECT_BOOT_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_UPDATE_OPTION)))
FWUpdate_SELECT_BOOT_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects only BIOS FW images to be updated
#
# @param none
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateBios(self):
FWUpdate_SELECT_BIOS_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BIOS_UPDATE_OPTION)))
FWUpdate_SELECT_BIOS_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects only BIOS FW images to be updated
#
# @param None
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def selectUpdateBoot_APP(self):
FWUpdate_SELECT_BOOT_RADIO = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_UPDATE_OPTION)))
FWUpdate_SELECT_BOOT_RADIO.click()
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects proceed button
#
# @param None
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def doProceed(self):
FWUpdate_PROCEED_BUTTON = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_BOOT_PROCEED)))
FWUpdate_PROCEED_BUTTON.click()
WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until( EC.alert_is_present() )
alert=self.Page.driver.switch_to.alert.accept()
return BMC_CONST.FW_SUCCESS
##
# @brief This function waits for fw update to be completed. Expectation
# is that an alert box will popup at the end of the FW update
#
# @param timeout @type int time to wait for an alert to be present
#
# @return BMC_CONST.FW_SUCCESS upon success
# This function may throw some unexpected exception on failure
# which will be caught by the calling function
#
def WaitForFWUpdateComplete(self,timeout):
try:
WebDriverWait(self.Page.driver,timeout).until(EC.alert_is_present())
alert=self.Page.driver.switch_to.alert.accept()
except TimeoutException:
print("FWUpdate_Page::WaitForFWUpdateComplete- \
No alert present. FW Update may not have \
completed successfully. Need to check!!")
return BMC_CONST.FW_SUCCESS | en | 0.72899 | #!/usr/bin/env python2 # IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # $Source: op-test-framework/common/../FWUpdatePage.py $ # # OpenPOWER Automated Test Project # # Contributors Listed Below - COPYRIGHT 2015 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # IBM_PROLOG_END_TAG ## # @file: FWUpdatePage.py # @brief: This file contains functions to browse through FW Update Menus and # manage FW Update related Pages # ## # FWUpdatePage # @brief: This class manages interaction with FW Update # menus and webpages # ## # @brief Constructor - Takes a pointer to BMC WebDriver # @param page instance # @return none # ## # @brief Function to traverse to BMC FW Update page # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief Function to traverse to BMC FW Protocol Configuration Page # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects AMI option in the FW Update page # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function hits continue button on all FW Update web pages # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects HPM option in the FW Update page # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects Protocol options from the drop down menu # in Protocol config page(This is hard-coded to select TFTP protocol for now) # # @param protocol - String which identified hwat protocol to select. This # string should match the options listed in BMC drop down menu # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # #This is hard-coded to select TFTP protocol for now ## # @brief This function updates text field which contains server hosting # BMC image # # @param addr - Fills out IP address of server providing the BMC image # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function updates imagename field. Full path to the image # needs to be provided # # @param image - full path to the BMC image # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function saves the updated protocol configuration. This page # prompts a javascript alert which will be accepted # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function provides the path to a BMC FW Image file # # @param Full path to the BMC image file # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function clicks the OK button at FW Update option # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects all FW images to be updated BIOS and Boot-App # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects only BIOS FW images to be updated # # @param none # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects only BIOS FW images to be updated # # @param None # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function selects proceed button # # @param None # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # ## # @brief This function waits for fw update to be completed. Expectation # is that an alert box will popup at the end of the FW update # # @param timeout @type int time to wait for an alert to be present # # @return BMC_CONST.FW_SUCCESS upon success # This function may throw some unexpected exception on failure # which will be caught by the calling function # | 1.795793 | 2 |
Model.py | arpitsri3/CarND-Behavioral-Cloning | 0 | 6614106 |
# coding: utf-8
# CarND-Behavioral-Cloning-P3
# In[3]:
#Importing Dependencies when required
import os
import csv
samples=[]
with open('./data/driving_log.csv') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # Rewind.
reader=csv.reader(csvfile)
if has_header:
next(reader) # Skip header row.
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.21)
#Quick Visualization of what we did above
print("Length of Training Data: ",len(train_samples))
print("Random datapoint - ",train_samples[9])
print("Length of Validation Data: ",len(validation_samples))
print("Random datapoint - ",validation_samples[9])
# In[4]:
#Using the example Generator from Classroom
import cv2
import numpy as np
from sklearn.utils import shuffle
from matplotlib import pyplot as plt
from scipy.misc import toimage
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.regularizers import l2
from keras.optimizers import Adam
def generator(samples, batch_size=33):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+(batch_size)]
images = []
angles = []
for batch_sample in batch_samples:
name = './data/IMG/'+batch_sample[0].split('/')[-1]
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
name = './data/IMG/'+batch_sample[1].split('/')[-1]
left_image = cv2.imread(name)
left_angle = float(batch_sample[3])+0.25
name = './data/IMG/'+batch_sample[2].split('/')[-1]
right_image = cv2.imread(name)
right_angle = float(batch_sample[3])-0.25
images.append(center_image)
angles.append(center_angle)
images.append(left_image)
angles.append(left_angle)
images.append(right_image)
angles.append(right_angle)
#Augment Data by flipping
augmented_images, augmented_measurements = [] , []
for image,measurement in zip(images, angles):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=33)
validation_generator = generator(validation_samples, batch_size=33)
#ch, row, col = 3, 160, 320 # Trimmed image format
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
#model.add(Lambda(lambda x: x/127.5 - 1.))
#model.add(... finish defining the rest of your model architecture here ...)
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3), output_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(64,3,3,activation="relu",W_regularizer=l2(0.001)))
#model.add(MaxPooling2D((1,1)))
model.add(Convolution2D(64,3,3,activation="relu",W_regularizer=l2(0.001)))
model.add(Flatten())
model.add(Dense(100,W_regularizer=l2(0.001)))
#model.add(Dropout(.6))
#model.add(ELU())
model.add(Dense(50,W_regularizer=l2(0.001)))
model.add(Dense(10,W_regularizer=l2(0.001)))
model.add(Dense(1))
#Adam(lr=1e-4)
model.compile(loss='mse', optimizer='adam')
history_object=model.fit_generator(train_generator, samples_per_epoch= len(train_samples)*6, validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5)
model.save('model.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
|
# coding: utf-8
# CarND-Behavioral-Cloning-P3
# In[3]:
#Importing Dependencies when required
import os
import csv
samples=[]
with open('./data/driving_log.csv') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # Rewind.
reader=csv.reader(csvfile)
if has_header:
next(reader) # Skip header row.
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.21)
#Quick Visualization of what we did above
print("Length of Training Data: ",len(train_samples))
print("Random datapoint - ",train_samples[9])
print("Length of Validation Data: ",len(validation_samples))
print("Random datapoint - ",validation_samples[9])
# In[4]:
#Using the example Generator from Classroom
import cv2
import numpy as np
from sklearn.utils import shuffle
from matplotlib import pyplot as plt
from scipy.misc import toimage
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.regularizers import l2
from keras.optimizers import Adam
def generator(samples, batch_size=33):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+(batch_size)]
images = []
angles = []
for batch_sample in batch_samples:
name = './data/IMG/'+batch_sample[0].split('/')[-1]
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
name = './data/IMG/'+batch_sample[1].split('/')[-1]
left_image = cv2.imread(name)
left_angle = float(batch_sample[3])+0.25
name = './data/IMG/'+batch_sample[2].split('/')[-1]
right_image = cv2.imread(name)
right_angle = float(batch_sample[3])-0.25
images.append(center_image)
angles.append(center_angle)
images.append(left_image)
angles.append(left_angle)
images.append(right_image)
angles.append(right_angle)
#Augment Data by flipping
augmented_images, augmented_measurements = [] , []
for image,measurement in zip(images, angles):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=33)
validation_generator = generator(validation_samples, batch_size=33)
#ch, row, col = 3, 160, 320 # Trimmed image format
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
#model.add(Lambda(lambda x: x/127.5 - 1.))
#model.add(... finish defining the rest of your model architecture here ...)
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3), output_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu",W_regularizer=l2(0.001)))
model.add(Convolution2D(64,3,3,activation="relu",W_regularizer=l2(0.001)))
#model.add(MaxPooling2D((1,1)))
model.add(Convolution2D(64,3,3,activation="relu",W_regularizer=l2(0.001)))
model.add(Flatten())
model.add(Dense(100,W_regularizer=l2(0.001)))
#model.add(Dropout(.6))
#model.add(ELU())
model.add(Dense(50,W_regularizer=l2(0.001)))
model.add(Dense(10,W_regularizer=l2(0.001)))
model.add(Dense(1))
#Adam(lr=1e-4)
model.compile(loss='mse', optimizer='adam')
history_object=model.fit_generator(train_generator, samples_per_epoch= len(train_samples)*6, validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5)
model.save('model.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
| en | 0.623957 | # coding: utf-8 # CarND-Behavioral-Cloning-P3 # In[3]: #Importing Dependencies when required # Rewind. # Skip header row. #Quick Visualization of what we did above # In[4]: #Using the example Generator from Classroom # Loop forever so the generator never terminates #Augment Data by flipping # compile and train the model using the generator function #ch, row, col = 3, 160, 320 # Trimmed image format # Preprocess incoming data, centered around zero with small standard deviation #model.add(Lambda(lambda x: x/127.5 - 1.)) #model.add(... finish defining the rest of your model architecture here ...) #model.add(MaxPooling2D((1,1))) #model.add(Dropout(.6)) #model.add(ELU()) #Adam(lr=1e-4) ### print the keys contained in the history object ### plot the training and validation loss for each epoch | 2.784378 | 3 |
pyeccodes/defs/grib2/tables/6/4_230_table.py | ecmwf/pyeccodes | 7 | 6614107 | <filename>pyeccodes/defs/grib2/tables/6/4_230_table.py
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Ozone'},
{'abbr': 1, 'code': 1, 'title': 'Water vapour'},
{'abbr': 2, 'code': 2, 'title': 'Methane'},
{'abbr': 3, 'code': 3, 'title': 'Carbon dioxide'},
{'abbr': 4, 'code': 4, 'title': 'Carbon monoxide'},
{'abbr': 5, 'code': 5, 'title': 'Nitrogen dioxide'},
{'abbr': 6, 'code': 6, 'title': 'Nitrous oxide'},
{'abbr': 7, 'code': 7, 'title': 'Formaldehyde'},
{'abbr': 8, 'code': 8, 'title': 'Sulphur dioxide'},
{'abbr': 9, 'code': 9, 'title': 'Ammonia'},
{'abbr': 10, 'code': 10, 'title': 'Ammonium'},
{'abbr': 11, 'code': 11, 'title': 'Nitrogen monoxide'},
{'abbr': 12, 'code': 12, 'title': 'Atomic oxygen'},
{'abbr': 13, 'code': 13, 'title': 'Nitrate radical'},
{'abbr': 14, 'code': 14, 'title': 'Hydroperoxyl radical'},
{'abbr': 15, 'code': 15, 'title': 'Dinitrogen pentoxide'},
{'abbr': 16, 'code': 16, 'title': 'Nitrous acid'},
{'abbr': 17, 'code': 17, 'title': 'Nitric acid'},
{'abbr': 18, 'code': 18, 'title': 'Peroxynitric acid'},
{'abbr': 19, 'code': 19, 'title': 'Hydrogen peroxide'},
{'abbr': 20, 'code': 20, 'title': 'Molecular hydrogen'},
{'abbr': 21, 'code': 21, 'title': 'Atomic nitrogen'},
{'abbr': 22, 'code': 22, 'title': 'Sulphate'},
{'abbr': 23, 'code': 23, 'title': 'Radon'},
{'abbr': 24, 'code': 24, 'title': 'Elemental mercury'},
{'abbr': 25, 'code': 25, 'title': 'Divalent mercury'},
{'abbr': 26, 'code': 26, 'title': 'Atomic chlorine'},
{'abbr': 27, 'code': 27, 'title': 'Chlorine monoxide'},
{'abbr': 28, 'code': 28, 'title': 'Dichlorine peroxide'},
{'abbr': 29, 'code': 29, 'title': 'Hypochlorous acid'},
{'abbr': 30, 'code': 30, 'title': 'Chlorine nitrate'},
{'abbr': 31, 'code': 31, 'title': 'Chlorine dioxide'},
{'abbr': 32, 'code': 32, 'title': 'Atomic bromine'},
{'abbr': 33, 'code': 33, 'title': 'Bromine monoxide'},
{'abbr': 34, 'code': 34, 'title': 'Bromine chloride'},
{'abbr': 35, 'code': 35, 'title': 'Hydrogen bromide'},
{'abbr': 36, 'code': 36, 'title': 'Hypobromous acid'},
{'abbr': 37, 'code': 37, 'title': 'Bromine nitrate'},
{'abbr': 10000, 'code': 10000, 'title': 'Hydroxyl radical'},
{'abbr': 10001, 'code': 10001, 'title': 'Methyl peroxy radical'},
{'abbr': 10002, 'code': 10002, 'title': 'Methyl hydroperoxide'},
{'abbr': 10004, 'code': 10004, 'title': 'Methanol'},
{'abbr': 10005, 'code': 10005, 'title': 'Formic acid'},
{'abbr': 10006, 'code': 10006, 'title': 'Hydrogen Cyanide'},
{'abbr': 10007, 'code': 10007, 'title': 'Aceto nitrile'},
{'abbr': 10008, 'code': 10008, 'title': 'Ethane'},
{'abbr': 10009, 'code': 10009, 'title': 'Ethene', 'units': '= Ethylene'},
{'abbr': 10010, 'code': 10010, 'title': 'Ethyne', 'units': '= Acetylene'},
{'abbr': 10011, 'code': 10011, 'title': 'Ethanol'},
{'abbr': 10012, 'code': 10012, 'title': 'Acetic acid'},
{'abbr': 10013, 'code': 10013, 'title': 'Peroxyacetyl nitrate'},
{'abbr': 10014, 'code': 10014, 'title': 'Propane'},
{'abbr': 10015, 'code': 10015, 'title': 'Propene'},
{'abbr': 10016, 'code': 10016, 'title': 'Butanes'},
{'abbr': 10017, 'code': 10017, 'title': 'Isoprene'},
{'abbr': 10018, 'code': 10018, 'title': 'Alpha pinene'},
{'abbr': 10019, 'code': 10019, 'title': 'Beta pinene'},
{'abbr': 10020, 'code': 10020, 'title': 'Limonene'},
{'abbr': 10021, 'code': 10021, 'title': 'Benzene'},
{'abbr': 10022, 'code': 10022, 'title': 'Toluene'},
{'abbr': 10023, 'code': 10023, 'title': 'Xylene'},
{'abbr': 10500, 'code': 10500, 'title': 'Dimethyl sulphide'},
{'abbr': 20001, 'code': 20001, 'title': 'Hydrogen chloride'},
{'abbr': 20002, 'code': 20002, 'title': 'CFC-11'},
{'abbr': 20003, 'code': 20003, 'title': 'CFC-12'},
{'abbr': 20004, 'code': 20004, 'title': 'CFC-113'},
{'abbr': 20005, 'code': 20005, 'title': 'CFC-113a'},
{'abbr': 20006, 'code': 20006, 'title': 'CFC-114'},
{'abbr': 20007, 'code': 20007, 'title': 'CFC-115'},
{'abbr': 20008, 'code': 20008, 'title': 'HCFC-22'},
{'abbr': 20009, 'code': 20009, 'title': 'HCFC-141b'},
{'abbr': 20010, 'code': 20010, 'title': 'HCFC-142b'},
{'abbr': 20011, 'code': 20011, 'title': 'Halon-1202'},
{'abbr': 20012, 'code': 20012, 'title': 'Halon-1211'},
{'abbr': 20013, 'code': 20013, 'title': 'Halon-1301'},
{'abbr': 20014, 'code': 20014, 'title': 'Halon-2402'},
{'abbr': 20015, 'code': 20015, 'title': 'Methyl chloride', 'units': 'HCC-40'},
{'abbr': 20016,
'code': 20016,
'title': 'Carbon tetrachloride',
'units': 'HCC-10'},
{'abbr': 20017, 'code': 20017, 'title': 'HCC-140a'},
{'abbr': 20018, 'code': 20018, 'title': 'Methyl bromide', 'units': 'HBC-40B1'},
{'abbr': 20019,
'code': 20019,
'title': 'Hexachlorocyclohexane',
'units': 'HCH'},
{'abbr': 20020, 'code': 20020, 'title': 'Alpha hexachlorocyclohexane'},
{'abbr': 20021,
'code': 20021,
'title': 'Hexachlorobiphenyl',
'units': 'PCB-153'},
{'abbr': 60000, 'code': 60000, 'title': 'HOx radical', 'units': 'OH+HO2'},
{'abbr': 60001,
'code': 60001,
'title': 'Total inorganic and organic peroxy radicals',
'units': 'HO2 + RO2'},
{'abbr': 60002, 'code': 60002, 'title': 'Passive Ozone'},
{'abbr': 60003, 'code': 60003, 'title': 'NOx expressed as nitrogen'},
{'abbr': 60004,
'code': 60004,
'title': 'All nitrogen oxides (NOy) expressed as nitrogen'},
{'abbr': 60005, 'code': 60005, 'title': 'Total inorganic chlorine'},
{'abbr': 60006, 'code': 60006, 'title': 'Total inorganic bromine'},
{'abbr': 60007,
'code': 60007,
'title': 'Total inorganic chlorine except HCl, ClONO2: ClOx'},
{'abbr': 60008,
'code': 60008,
'title': 'Total inorganic bromine except HBr, BrONO2: BrOx'},
{'abbr': 60009, 'code': 60009, 'title': 'Lumped Alkanes'},
{'abbr': 60010, 'code': 60010, 'title': 'Lumped Alkenes'},
{'abbr': 60011, 'code': 60011, 'title': 'Lumped Aromatic Compounds'},
{'abbr': 60012, 'code': 60012, 'title': 'Lumped Terpenes'},
{'abbr': 60013,
'code': 60013,
'title': 'Non-methane volatile organic compounds expressed as carbon'},
{'abbr': 60014,
'code': 60014,
'title': 'Anthropogenic non-methane volatile organic compounds expressed as '
'carbon'},
{'abbr': 60015,
'code': 60015,
'title': 'Biogenic non-methane volatile organic compounds expressed as '
'carbon'},
{'abbr': 60016, 'code': 60016, 'title': 'Lumped oxygenated hydrocarbons'},
{'abbr': 62000, 'code': 62000, 'title': 'Total aerosol'},
{'abbr': 62001, 'code': 62001, 'title': 'Dust dry'},
{'abbr': 62002, 'code': 62002, 'title': 'Water in ambient'},
{'abbr': 62003, 'code': 62003, 'title': 'Ammonium dry'},
{'abbr': 62004, 'code': 62004, 'title': 'Nitrate dry'},
{'abbr': 62005, 'code': 62005, 'title': 'Nitric acid trihydrate'},
{'abbr': 62006, 'code': 62006, 'title': 'Sulphate dry'},
{'abbr': 62007, 'code': 62007, 'title': 'Mercury dry'},
{'abbr': 62008, 'code': 62008, 'title': 'Sea salt dry'},
{'abbr': 62009, 'code': 62009, 'title': 'Black carbon dry'},
{'abbr': 62010, 'code': 62010, 'title': 'Particulate organic matter dry'},
{'abbr': 62011,
'code': 62011,
'title': 'Primary particulate organic matter dry'},
{'abbr': 62012,
'code': 62012,
'title': 'Secondary particulate organic matter dry'},
{'abbr': 65535, 'code': 65535, 'title': 'Missing'})
| <filename>pyeccodes/defs/grib2/tables/6/4_230_table.py
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Ozone'},
{'abbr': 1, 'code': 1, 'title': 'Water vapour'},
{'abbr': 2, 'code': 2, 'title': 'Methane'},
{'abbr': 3, 'code': 3, 'title': 'Carbon dioxide'},
{'abbr': 4, 'code': 4, 'title': 'Carbon monoxide'},
{'abbr': 5, 'code': 5, 'title': 'Nitrogen dioxide'},
{'abbr': 6, 'code': 6, 'title': 'Nitrous oxide'},
{'abbr': 7, 'code': 7, 'title': 'Formaldehyde'},
{'abbr': 8, 'code': 8, 'title': 'Sulphur dioxide'},
{'abbr': 9, 'code': 9, 'title': 'Ammonia'},
{'abbr': 10, 'code': 10, 'title': 'Ammonium'},
{'abbr': 11, 'code': 11, 'title': 'Nitrogen monoxide'},
{'abbr': 12, 'code': 12, 'title': 'Atomic oxygen'},
{'abbr': 13, 'code': 13, 'title': 'Nitrate radical'},
{'abbr': 14, 'code': 14, 'title': 'Hydroperoxyl radical'},
{'abbr': 15, 'code': 15, 'title': 'Dinitrogen pentoxide'},
{'abbr': 16, 'code': 16, 'title': 'Nitrous acid'},
{'abbr': 17, 'code': 17, 'title': 'Nitric acid'},
{'abbr': 18, 'code': 18, 'title': 'Peroxynitric acid'},
{'abbr': 19, 'code': 19, 'title': 'Hydrogen peroxide'},
{'abbr': 20, 'code': 20, 'title': 'Molecular hydrogen'},
{'abbr': 21, 'code': 21, 'title': 'Atomic nitrogen'},
{'abbr': 22, 'code': 22, 'title': 'Sulphate'},
{'abbr': 23, 'code': 23, 'title': 'Radon'},
{'abbr': 24, 'code': 24, 'title': 'Elemental mercury'},
{'abbr': 25, 'code': 25, 'title': 'Divalent mercury'},
{'abbr': 26, 'code': 26, 'title': 'Atomic chlorine'},
{'abbr': 27, 'code': 27, 'title': 'Chlorine monoxide'},
{'abbr': 28, 'code': 28, 'title': 'Dichlorine peroxide'},
{'abbr': 29, 'code': 29, 'title': 'Hypochlorous acid'},
{'abbr': 30, 'code': 30, 'title': 'Chlorine nitrate'},
{'abbr': 31, 'code': 31, 'title': 'Chlorine dioxide'},
{'abbr': 32, 'code': 32, 'title': 'Atomic bromine'},
{'abbr': 33, 'code': 33, 'title': 'Bromine monoxide'},
{'abbr': 34, 'code': 34, 'title': 'Bromine chloride'},
{'abbr': 35, 'code': 35, 'title': 'Hydrogen bromide'},
{'abbr': 36, 'code': 36, 'title': 'Hypobromous acid'},
{'abbr': 37, 'code': 37, 'title': 'Bromine nitrate'},
{'abbr': 10000, 'code': 10000, 'title': 'Hydroxyl radical'},
{'abbr': 10001, 'code': 10001, 'title': 'Methyl peroxy radical'},
{'abbr': 10002, 'code': 10002, 'title': 'Methyl hydroperoxide'},
{'abbr': 10004, 'code': 10004, 'title': 'Methanol'},
{'abbr': 10005, 'code': 10005, 'title': 'Formic acid'},
{'abbr': 10006, 'code': 10006, 'title': 'Hydrogen Cyanide'},
{'abbr': 10007, 'code': 10007, 'title': 'Aceto nitrile'},
{'abbr': 10008, 'code': 10008, 'title': 'Ethane'},
{'abbr': 10009, 'code': 10009, 'title': 'Ethene', 'units': '= Ethylene'},
{'abbr': 10010, 'code': 10010, 'title': 'Ethyne', 'units': '= Acetylene'},
{'abbr': 10011, 'code': 10011, 'title': 'Ethanol'},
{'abbr': 10012, 'code': 10012, 'title': 'Acetic acid'},
{'abbr': 10013, 'code': 10013, 'title': 'Peroxyacetyl nitrate'},
{'abbr': 10014, 'code': 10014, 'title': 'Propane'},
{'abbr': 10015, 'code': 10015, 'title': 'Propene'},
{'abbr': 10016, 'code': 10016, 'title': 'Butanes'},
{'abbr': 10017, 'code': 10017, 'title': 'Isoprene'},
{'abbr': 10018, 'code': 10018, 'title': 'Alpha pinene'},
{'abbr': 10019, 'code': 10019, 'title': 'Beta pinene'},
{'abbr': 10020, 'code': 10020, 'title': 'Limonene'},
{'abbr': 10021, 'code': 10021, 'title': 'Benzene'},
{'abbr': 10022, 'code': 10022, 'title': 'Toluene'},
{'abbr': 10023, 'code': 10023, 'title': 'Xylene'},
{'abbr': 10500, 'code': 10500, 'title': 'Dimethyl sulphide'},
{'abbr': 20001, 'code': 20001, 'title': 'Hydrogen chloride'},
{'abbr': 20002, 'code': 20002, 'title': 'CFC-11'},
{'abbr': 20003, 'code': 20003, 'title': 'CFC-12'},
{'abbr': 20004, 'code': 20004, 'title': 'CFC-113'},
{'abbr': 20005, 'code': 20005, 'title': 'CFC-113a'},
{'abbr': 20006, 'code': 20006, 'title': 'CFC-114'},
{'abbr': 20007, 'code': 20007, 'title': 'CFC-115'},
{'abbr': 20008, 'code': 20008, 'title': 'HCFC-22'},
{'abbr': 20009, 'code': 20009, 'title': 'HCFC-141b'},
{'abbr': 20010, 'code': 20010, 'title': 'HCFC-142b'},
{'abbr': 20011, 'code': 20011, 'title': 'Halon-1202'},
{'abbr': 20012, 'code': 20012, 'title': 'Halon-1211'},
{'abbr': 20013, 'code': 20013, 'title': 'Halon-1301'},
{'abbr': 20014, 'code': 20014, 'title': 'Halon-2402'},
{'abbr': 20015, 'code': 20015, 'title': 'Methyl chloride', 'units': 'HCC-40'},
{'abbr': 20016,
'code': 20016,
'title': 'Carbon tetrachloride',
'units': 'HCC-10'},
{'abbr': 20017, 'code': 20017, 'title': 'HCC-140a'},
{'abbr': 20018, 'code': 20018, 'title': 'Methyl bromide', 'units': 'HBC-40B1'},
{'abbr': 20019,
'code': 20019,
'title': 'Hexachlorocyclohexane',
'units': 'HCH'},
{'abbr': 20020, 'code': 20020, 'title': 'Alpha hexachlorocyclohexane'},
{'abbr': 20021,
'code': 20021,
'title': 'Hexachlorobiphenyl',
'units': 'PCB-153'},
{'abbr': 60000, 'code': 60000, 'title': 'HOx radical', 'units': 'OH+HO2'},
{'abbr': 60001,
'code': 60001,
'title': 'Total inorganic and organic peroxy radicals',
'units': 'HO2 + RO2'},
{'abbr': 60002, 'code': 60002, 'title': 'Passive Ozone'},
{'abbr': 60003, 'code': 60003, 'title': 'NOx expressed as nitrogen'},
{'abbr': 60004,
'code': 60004,
'title': 'All nitrogen oxides (NOy) expressed as nitrogen'},
{'abbr': 60005, 'code': 60005, 'title': 'Total inorganic chlorine'},
{'abbr': 60006, 'code': 60006, 'title': 'Total inorganic bromine'},
{'abbr': 60007,
'code': 60007,
'title': 'Total inorganic chlorine except HCl, ClONO2: ClOx'},
{'abbr': 60008,
'code': 60008,
'title': 'Total inorganic bromine except HBr, BrONO2: BrOx'},
{'abbr': 60009, 'code': 60009, 'title': 'Lumped Alkanes'},
{'abbr': 60010, 'code': 60010, 'title': 'Lumped Alkenes'},
{'abbr': 60011, 'code': 60011, 'title': 'Lumped Aromatic Compounds'},
{'abbr': 60012, 'code': 60012, 'title': 'Lumped Terpenes'},
{'abbr': 60013,
'code': 60013,
'title': 'Non-methane volatile organic compounds expressed as carbon'},
{'abbr': 60014,
'code': 60014,
'title': 'Anthropogenic non-methane volatile organic compounds expressed as '
'carbon'},
{'abbr': 60015,
'code': 60015,
'title': 'Biogenic non-methane volatile organic compounds expressed as '
'carbon'},
{'abbr': 60016, 'code': 60016, 'title': 'Lumped oxygenated hydrocarbons'},
{'abbr': 62000, 'code': 62000, 'title': 'Total aerosol'},
{'abbr': 62001, 'code': 62001, 'title': 'Dust dry'},
{'abbr': 62002, 'code': 62002, 'title': 'Water in ambient'},
{'abbr': 62003, 'code': 62003, 'title': 'Ammonium dry'},
{'abbr': 62004, 'code': 62004, 'title': 'Nitrate dry'},
{'abbr': 62005, 'code': 62005, 'title': 'Nitric acid trihydrate'},
{'abbr': 62006, 'code': 62006, 'title': 'Sulphate dry'},
{'abbr': 62007, 'code': 62007, 'title': 'Mercury dry'},
{'abbr': 62008, 'code': 62008, 'title': 'Sea salt dry'},
{'abbr': 62009, 'code': 62009, 'title': 'Black carbon dry'},
{'abbr': 62010, 'code': 62010, 'title': 'Particulate organic matter dry'},
{'abbr': 62011,
'code': 62011,
'title': 'Primary particulate organic matter dry'},
{'abbr': 62012,
'code': 62012,
'title': 'Secondary particulate organic matter dry'},
{'abbr': 65535, 'code': 65535, 'title': 'Missing'})
| none | 1 | 2.109496 | 2 | |
accounts/admin.py | asandeep/pseudo-electronics | 0 | 6614108 | <gh_stars>0
import rolepermissions
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from accounts import forms, models
class UserAdmin(
auth_admin.UserAdmin, rolepermissions.admin.RolePermissionsUserAdminMixin
):
form = forms.UserChangeForm
add_form = forms.UserCreationForm
admin.site.register(models.User, UserAdmin)
| import rolepermissions
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from accounts import forms, models
class UserAdmin(
auth_admin.UserAdmin, rolepermissions.admin.RolePermissionsUserAdminMixin
):
form = forms.UserChangeForm
add_form = forms.UserCreationForm
admin.site.register(models.User, UserAdmin) | none | 1 | 1.841642 | 2 | |
setup.py | xinyushi/SIR.Model | 0 | 6614109 | <filename>setup.py
from setuptools import setup
import sys, os
import setuptools
this_dir = os.path.dirname(os.path.realpath(__file__))
__version__ = '0.0.0'
setup(
name='sir',
version=__version__,
author='<NAME>, <NAME>, <NAME>',
description='a basic SIR model package with some variations',
python_requires='>=3.6',
packages=['sir'],#,'SIR_continuous_reinfected','R_continuous_mask'],
zip_safe=True,
)
| <filename>setup.py
from setuptools import setup
import sys, os
import setuptools
this_dir = os.path.dirname(os.path.realpath(__file__))
__version__ = '0.0.0'
setup(
name='sir',
version=__version__,
author='<NAME>, <NAME>, <NAME>',
description='a basic SIR model package with some variations',
python_requires='>=3.6',
packages=['sir'],#,'SIR_continuous_reinfected','R_continuous_mask'],
zip_safe=True,
)
| en | 0.569467 | #,'SIR_continuous_reinfected','R_continuous_mask'], | 1.345215 | 1 |
soduku.py | rodincode/python | 1 | 6614110 | <reponame>rodincode/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 20:25:09 2020
@author: dell
"""
#Sudoku Generator Algorithm - www.101computing.net/sudoku-generator-algorithm/
import turtle
from random import randint, shuffle
from time import sleep
#initialise empty 9 by 9 grid
grid = []
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
myPen = turtle.Turtle()
myPen.tracer(0)
myPen.speed(0)
myPen.color("#000000")
myPen.hideturtle()
topLeft_x=-150
topLeft_y=150
def text(message,x,y,size):
FONT = ('Arial', size, 'normal')
myPen.penup()
myPen.goto(x,y)
myPen.write(message,align="left",font=FONT)
#A procedure to draw the grid on screen using Python Turtle
def drawGrid(grid):
intDim=35
for row in range(0,10):
if (row%3)==0:
myPen.pensize(3)
else:
myPen.pensize(1)
myPen.penup()
myPen.goto(topLeft_x,topLeft_y-row*intDim)
myPen.pendown()
myPen.goto(topLeft_x+9*intDim,topLeft_y-row*intDim)
for col in range(0,10):
if (col%3)==0:
myPen.pensize(3)
else:
myPen.pensize(1)
myPen.penup()
myPen.goto(topLeft_x+col*intDim,topLeft_y)
myPen.pendown()
myPen.goto(topLeft_x+col*intDim,topLeft_y-9*intDim)
for row in range (0,9):
for col in range (0,9):
if grid[row][col]!=0:
text(grid[row][col],topLeft_x+col*intDim+9,topLeft_y-row*intDim-intDim+8,18)
#A function to check if the grid is full
def checkGrid(grid):
for row in range(0,9):
for col in range(0,9):
if grid[row][col]==0:
return False
#We have a complete grid!
return True
#A backtracking/recursive function to check all possible combinations of numbers until a solution is found
def solveGrid(grid):
global counter
#Find next empty cell
for i in range(0,81):
row=i//9
col=i%9
if grid[row][col]==0:
for value in range (1,10):
#Check that this value has not already be used on this row
if not(value in grid[row]):
#Check that this value has not already be used on this column
if not value in (grid[0][col],grid[1][col],grid[2][col],grid[3][col],grid[4][col],grid[5][col],grid[6][col],grid[7][col],grid[8][col]):
#Identify which of the 9 squares we are working on
square=[]
if row<3:
if col<3:
square=[grid[i][0:3] for i in range(0,3)]
elif col<6:
square=[grid[i][3:6] for i in range(0,3)]
else:
square=[grid[i][6:9] for i in range(0,3)]
elif row<6:
if col<3:
square=[grid[i][0:3] for i in range(3,6)]
elif col<6:
square=[grid[i][3:6] for i in range(3,6)]
else:
square=[grid[i][6:9] for i in range(3,6)]
else:
if col<3:
square=[grid[i][0:3] for i in range(6,9)]
elif col<6:
square=[grid[i][3:6] for i in range(6,9)]
else:
square=[grid[i][6:9] for i in range(6,9)]
#Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col]=value
if checkGrid(grid):
counter+=1
break
else:
if solveGrid(grid):
return True
break
grid[row][col]=0
numberList=[1,2,3,4,5,6,7,8,9]
#shuffle(numberList)
#A backtracking/recursive function to check all possible combinations of numbers until a solution is found
def fillGrid(grid):
global counter
#Find next empty cell
for i in range(0,81):
row=i//9
col=i%9
if grid[row][col]==0:
shuffle(numberList)
for value in numberList:
#Check that this value has not already be used on this row
if not(value in grid[row]):
#Check that this value has not already be used on this column
if not value in (grid[0][col],grid[1][col],grid[2][col],grid[3][col],grid[4][col],grid[5][col],grid[6][col],grid[7][col],grid[8][col]):
#Identify which of the 9 squares we are working on
square=[]
if row<3:
if col<3:
square=[grid[i][0:3] for i in range(0,3)]
elif col<6:
square=[grid[i][3:6] for i in range(0,3)]
else:
square=[grid[i][6:9] for i in range(0,3)]
elif row<6:
if col<3:
square=[grid[i][0:3] for i in range(3,6)]
elif col<6:
square=[grid[i][3:6] for i in range(3,6)]
else:
square=[grid[i][6:9] for i in range(3,6)]
else:
if col<3:
square=[grid[i][0:3] for i in range(6,9)]
elif col<6:
square=[grid[i][3:6] for i in range(6,9)]
else:
square=[grid[i][6:9] for i in range(6,9)]
#Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col]=value
if checkGrid(grid):
return True
else:
if fillGrid(grid):
return True
break
grid[row][col]=0
#Generate a Fully Solved Grid
fillGrid(grid)
drawGrid(grid)
myPen.getscreen().update()
sleep(1)
'''
#Start Removing Numbers one by one
#A higher number of attempts will end up removing more numbers from the grid
#Potentially resulting in more difficiult grids to solve!
attempts = 5
counter=1
while attempts>0:
#Select a random cell that is not already empty
row = randint(0,8)
col = randint(0,8)
while grid[row][col]==0:
row = randint(0,8)
col = randint(0,8)
#Remember its cell value in case we need to put it back
backup = grid[row][col]
grid[row][col]=0
#Take a full copy of the grid
copyGrid = []
for r in range(0,9):
copyGrid.append([])
for c in range(0,9):
copyGrid[r].append(grid[r][c])
#Count the number of solutions that this grid has (using a backtracking approach implemented in the solveGrid() function)
counter=0
solveGrid(copyGrid)
#If the number of solution is different from 1 then we need to cancel the change by putting the value we took away back in the grid
if counter!=1:
grid[row][col]=backup
#We could stop here, but we can also have another attempt with a different cell just to try to remove more numbers
attempts -= 1
myPen.clear()
drawGrid(grid)
myPen.getscreen().update()
'''
print("Sudoku Grid Ready") | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 20:25:09 2020
@author: dell
"""
#Sudoku Generator Algorithm - www.101computing.net/sudoku-generator-algorithm/
import turtle
from random import randint, shuffle
from time import sleep
#initialise empty 9 by 9 grid
grid = []
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
myPen = turtle.Turtle()
myPen.tracer(0)
myPen.speed(0)
myPen.color("#000000")
myPen.hideturtle()
topLeft_x=-150
topLeft_y=150
def text(message,x,y,size):
FONT = ('Arial', size, 'normal')
myPen.penup()
myPen.goto(x,y)
myPen.write(message,align="left",font=FONT)
#A procedure to draw the grid on screen using Python Turtle
def drawGrid(grid):
intDim=35
for row in range(0,10):
if (row%3)==0:
myPen.pensize(3)
else:
myPen.pensize(1)
myPen.penup()
myPen.goto(topLeft_x,topLeft_y-row*intDim)
myPen.pendown()
myPen.goto(topLeft_x+9*intDim,topLeft_y-row*intDim)
for col in range(0,10):
if (col%3)==0:
myPen.pensize(3)
else:
myPen.pensize(1)
myPen.penup()
myPen.goto(topLeft_x+col*intDim,topLeft_y)
myPen.pendown()
myPen.goto(topLeft_x+col*intDim,topLeft_y-9*intDim)
for row in range (0,9):
for col in range (0,9):
if grid[row][col]!=0:
text(grid[row][col],topLeft_x+col*intDim+9,topLeft_y-row*intDim-intDim+8,18)
#A function to check if the grid is full
def checkGrid(grid):
for row in range(0,9):
for col in range(0,9):
if grid[row][col]==0:
return False
#We have a complete grid!
return True
#A backtracking/recursive function to check all possible combinations of numbers until a solution is found
def solveGrid(grid):
global counter
#Find next empty cell
for i in range(0,81):
row=i//9
col=i%9
if grid[row][col]==0:
for value in range (1,10):
#Check that this value has not already be used on this row
if not(value in grid[row]):
#Check that this value has not already be used on this column
if not value in (grid[0][col],grid[1][col],grid[2][col],grid[3][col],grid[4][col],grid[5][col],grid[6][col],grid[7][col],grid[8][col]):
#Identify which of the 9 squares we are working on
square=[]
if row<3:
if col<3:
square=[grid[i][0:3] for i in range(0,3)]
elif col<6:
square=[grid[i][3:6] for i in range(0,3)]
else:
square=[grid[i][6:9] for i in range(0,3)]
elif row<6:
if col<3:
square=[grid[i][0:3] for i in range(3,6)]
elif col<6:
square=[grid[i][3:6] for i in range(3,6)]
else:
square=[grid[i][6:9] for i in range(3,6)]
else:
if col<3:
square=[grid[i][0:3] for i in range(6,9)]
elif col<6:
square=[grid[i][3:6] for i in range(6,9)]
else:
square=[grid[i][6:9] for i in range(6,9)]
#Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col]=value
if checkGrid(grid):
counter+=1
break
else:
if solveGrid(grid):
return True
break
grid[row][col]=0
numberList=[1,2,3,4,5,6,7,8,9]
#shuffle(numberList)
#A backtracking/recursive function to check all possible combinations of numbers until a solution is found
def fillGrid(grid):
global counter
#Find next empty cell
for i in range(0,81):
row=i//9
col=i%9
if grid[row][col]==0:
shuffle(numberList)
for value in numberList:
#Check that this value has not already be used on this row
if not(value in grid[row]):
#Check that this value has not already be used on this column
if not value in (grid[0][col],grid[1][col],grid[2][col],grid[3][col],grid[4][col],grid[5][col],grid[6][col],grid[7][col],grid[8][col]):
#Identify which of the 9 squares we are working on
square=[]
if row<3:
if col<3:
square=[grid[i][0:3] for i in range(0,3)]
elif col<6:
square=[grid[i][3:6] for i in range(0,3)]
else:
square=[grid[i][6:9] for i in range(0,3)]
elif row<6:
if col<3:
square=[grid[i][0:3] for i in range(3,6)]
elif col<6:
square=[grid[i][3:6] for i in range(3,6)]
else:
square=[grid[i][6:9] for i in range(3,6)]
else:
if col<3:
square=[grid[i][0:3] for i in range(6,9)]
elif col<6:
square=[grid[i][3:6] for i in range(6,9)]
else:
square=[grid[i][6:9] for i in range(6,9)]
#Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col]=value
if checkGrid(grid):
return True
else:
if fillGrid(grid):
return True
break
grid[row][col]=0
#Generate a Fully Solved Grid
fillGrid(grid)
drawGrid(grid)
myPen.getscreen().update()
sleep(1)
'''
#Start Removing Numbers one by one
#A higher number of attempts will end up removing more numbers from the grid
#Potentially resulting in more difficiult grids to solve!
attempts = 5
counter=1
while attempts>0:
#Select a random cell that is not already empty
row = randint(0,8)
col = randint(0,8)
while grid[row][col]==0:
row = randint(0,8)
col = randint(0,8)
#Remember its cell value in case we need to put it back
backup = grid[row][col]
grid[row][col]=0
#Take a full copy of the grid
copyGrid = []
for r in range(0,9):
copyGrid.append([])
for c in range(0,9):
copyGrid[r].append(grid[r][c])
#Count the number of solutions that this grid has (using a backtracking approach implemented in the solveGrid() function)
counter=0
solveGrid(copyGrid)
#If the number of solution is different from 1 then we need to cancel the change by putting the value we took away back in the grid
if counter!=1:
grid[row][col]=backup
#We could stop here, but we can also have another attempt with a different cell just to try to remove more numbers
attempts -= 1
myPen.clear()
drawGrid(grid)
myPen.getscreen().update()
'''
print("Sudoku Grid Ready") | en | 0.867383 | # -*- coding: utf-8 -*- Created on Mon Jul 20 20:25:09 2020
@author: dell #Sudoku Generator Algorithm - www.101computing.net/sudoku-generator-algorithm/ #initialise empty 9 by 9 grid #A procedure to draw the grid on screen using Python Turtle #A function to check if the grid is full #We have a complete grid! #A backtracking/recursive function to check all possible combinations of numbers until a solution is found #Find next empty cell #Check that this value has not already be used on this row #Check that this value has not already be used on this column #Identify which of the 9 squares we are working on #Check that this value has not already be used on this 3x3 square #shuffle(numberList) #A backtracking/recursive function to check all possible combinations of numbers until a solution is found #Find next empty cell #Check that this value has not already be used on this row #Check that this value has not already be used on this column #Identify which of the 9 squares we are working on #Check that this value has not already be used on this 3x3 square #Generate a Fully Solved Grid #Start Removing Numbers one by one
#A higher number of attempts will end up removing more numbers from the grid
#Potentially resulting in more difficiult grids to solve!
attempts = 5
counter=1
while attempts>0:
#Select a random cell that is not already empty
row = randint(0,8)
col = randint(0,8)
while grid[row][col]==0:
row = randint(0,8)
col = randint(0,8)
#Remember its cell value in case we need to put it back
backup = grid[row][col]
grid[row][col]=0
#Take a full copy of the grid
copyGrid = []
for r in range(0,9):
copyGrid.append([])
for c in range(0,9):
copyGrid[r].append(grid[r][c])
#Count the number of solutions that this grid has (using a backtracking approach implemented in the solveGrid() function)
counter=0
solveGrid(copyGrid)
#If the number of solution is different from 1 then we need to cancel the change by putting the value we took away back in the grid
if counter!=1:
grid[row][col]=backup
#We could stop here, but we can also have another attempt with a different cell just to try to remove more numbers
attempts -= 1
myPen.clear()
drawGrid(grid)
myPen.getscreen().update() | 3.970561 | 4 |
python second semester working scripts/eletrode instantaneous.py | pm2111/Heart-Defibrillation-Project | 0 | 6614111 | <filename>python second semester working scripts/eletrode instantaneous.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/preferential 2/fib 2 timesteps 0.2 nu/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
total = []
j1 = 100 #position of electrode
i1 = 100
k= 3
V = np.zeros((200,200))
for z in range(0,np.size(filenames),2):
data = np.genfromtxt(path + filenames[z])
x=0 #dummy
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
if (i >0) and (j >0):
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
#for i in range (1,200):
#for j in range (1,200):
#x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
plt.figure()
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement obtained by simulating an eletrode and linearly interpolating a wavefront", fontsize = 10)
plt.grid()
plt.show() | <filename>python second semester working scripts/eletrode instantaneous.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/preferential 2/fib 2 timesteps 0.2 nu/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
total = []
j1 = 100 #position of electrode
i1 = 100
k= 3
V = np.zeros((200,200))
for z in range(0,np.size(filenames),2):
data = np.genfromtxt(path + filenames[z])
x=0 #dummy
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
if (i >0) and (j >0):
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
#for i in range (1,200):
#for j in range (1,200):
#x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
plt.figure()
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement obtained by simulating an eletrode and linearly interpolating a wavefront", fontsize = 10)
plt.grid()
plt.show() | en | 0.203866 | #position of electrode #dummy #for i in range (1,200): #for j in range (1,200): #x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2) | 2.465333 | 2 |
test/draw.py | abidalrekab/blocksWorldv1.0 | 1 | 6614112 | <gh_stars>1-10
#!/usr/bin/env python
import os
import unittest
import sys
from dataSet import drawPoints
from PathsModule import fileName, root
from PathsModule import drawOutputPath
from PathsModule import getImage, getPath, validate
"""
This module tests for Valid Image with the given boundaries
TODO drawPattern
"""
if not os.path.exists(drawOutputPath):
os.makedirs(drawOutputPath)
try:
# try using the installed blocksWorld if available
from blocksWorld import *
except ImportError:
# blocksWorld not installed
blocksWorldPath = os.path.join(root, "..")
blocksWorldPath = os.path.abspath(blocksWorldPath)
sys.path.append(blocksWorldPath)
from blocksWorld import *
class TestDraw(unittest.TestCase):
"""
Plotting images for draw , drawWire and drawSolid
"""
# Result image for draw
def test_draw(self):
"""
This test is to draw a number of vertices labeled as 'A'
"""
# Create the file name and its saving path, and specify the reference file to compare to.
image_name = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(image_name)
# create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('L', (15, 90), 'white')
for i in range(len(drawPoints) - 1):
draw(canvas, (drawPoints[i + 0], drawPoints[i + 1]), 'A')
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
# Result image for drawWire
def test_drawWire(self):
"""
This test is to draw a line between two vertices
"""
# Create the file name and its saving path, and specify the reference file to compare to.
imageName = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(imageName)
# create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('L', (640, 480), 'white')
drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))
drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))
drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))
drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))
drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
# Result image for drawSolid
def test_drawSolid(self):
"""
This test is to create polygons filled with a particular color: red, blue,...ect.
"""
# first, Create the file name and its saving path, and specify the reference file to compare to.
imageName = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(imageName)
# Next, create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('RGB', (640, 480), 'white')
drawSolid(canvas, regularPolygon(3, np.array([160, 120]), 50), 'red')
drawSolid(canvas, regularPolygon(4, np.array([480, 120]), 90), 'blue')
drawSolid(canvas, regularPolygon(5, np.array([420, 360]), 60), 'green')
drawSolid(canvas, regularPolygon(6, np.array([160, 360]), 80), 'black')
drawSolid(canvas, regularPolygon(7, np.array([320, 160]), 70), 'brown')
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
import os
import unittest
import sys
from dataSet import drawPoints
from PathsModule import fileName, root
from PathsModule import drawOutputPath
from PathsModule import getImage, getPath, validate
"""
This module tests for Valid Image with the given boundaries
TODO drawPattern
"""
if not os.path.exists(drawOutputPath):
os.makedirs(drawOutputPath)
try:
# try using the installed blocksWorld if available
from blocksWorld import *
except ImportError:
# blocksWorld not installed
blocksWorldPath = os.path.join(root, "..")
blocksWorldPath = os.path.abspath(blocksWorldPath)
sys.path.append(blocksWorldPath)
from blocksWorld import *
class TestDraw(unittest.TestCase):
"""
Plotting images for draw , drawWire and drawSolid
"""
# Result image for draw
def test_draw(self):
"""
This test is to draw a number of vertices labeled as 'A'
"""
# Create the file name and its saving path, and specify the reference file to compare to.
image_name = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(image_name)
# create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('L', (15, 90), 'white')
for i in range(len(drawPoints) - 1):
draw(canvas, (drawPoints[i + 0], drawPoints[i + 1]), 'A')
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
# Result image for drawWire
def test_drawWire(self):
"""
This test is to draw a line between two vertices
"""
# Create the file name and its saving path, and specify the reference file to compare to.
imageName = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(imageName)
# create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('L', (640, 480), 'white')
drawWire(canvas, regularPolygon(3, np.array([160, 120]), 50))
drawWire(canvas, regularPolygon(4, np.array([480, 120]), 90))
drawWire(canvas, regularPolygon(5, np.array([420, 360]), 60))
drawWire(canvas, regularPolygon(6, np.array([160, 360]), 80))
drawWire(canvas, regularPolygon(7, np.array([320, 160]), 70))
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
# Result image for drawSolid
def test_drawSolid(self):
"""
This test is to create polygons filled with a particular color: red, blue,...ect.
"""
# first, Create the file name and its saving path, and specify the reference file to compare to.
imageName = fileName(sys._getframe().f_code.co_name)
resultFile, referenceFile = getPath(imageName)
# Next, create an empty image with a specific dimension with white background, and black/white colored
image, canvas = getImage('RGB', (640, 480), 'white')
drawSolid(canvas, regularPolygon(3, np.array([160, 120]), 50), 'red')
drawSolid(canvas, regularPolygon(4, np.array([480, 120]), 90), 'blue')
drawSolid(canvas, regularPolygon(5, np.array([420, 360]), 60), 'green')
drawSolid(canvas, regularPolygon(6, np.array([160, 360]), 80), 'black')
drawSolid(canvas, regularPolygon(7, np.array([320, 160]), 70), 'brown')
# saving the file and closing it
image.save(resultFile)
image.close()
# validate the resultant file against the reference images
validate(referenceFile, resultFile)
if __name__ == '__main__':
unittest.main() | en | 0.84884 | #!/usr/bin/env python This module tests for Valid Image with the given boundaries TODO drawPattern # try using the installed blocksWorld if available # blocksWorld not installed Plotting images for draw , drawWire and drawSolid # Result image for draw This test is to draw a number of vertices labeled as 'A' # Create the file name and its saving path, and specify the reference file to compare to. # create an empty image with a specific dimension with white background, and black/white colored # saving the file and closing it # validate the resultant file against the reference images # Result image for drawWire This test is to draw a line between two vertices # Create the file name and its saving path, and specify the reference file to compare to. # create an empty image with a specific dimension with white background, and black/white colored # saving the file and closing it # validate the resultant file against the reference images # Result image for drawSolid This test is to create polygons filled with a particular color: red, blue,...ect. # first, Create the file name and its saving path, and specify the reference file to compare to. # Next, create an empty image with a specific dimension with white background, and black/white colored # saving the file and closing it # validate the resultant file against the reference images | 2.721447 | 3 |
experimental/scratch/test_3.py | infer-actively/pymdp | 108 | 6614113 | # import sys
# import pathlib
# import numpy as np
# sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
from pymdp.agent import Agent
from pymdp.envs import VisualForagingEnv
print(
"""Initializing scene configuration with two scenes that share one feature
in common. Therefore each scene has only one disambiguating feature\n"""
)
scenes = np.zeros((2, 2, 2))
scenes[0][0, 0] = 1
scenes[0][1, 1] = 2
scenes[1][1, 1] = 2
scenes[1][1, 0] = 3
env = VisualForagingEnv(scenes=scenes, n_features=3)
agent = Agent(A=env.get_likelihood_dist(), B=env.get_transition_dist(), control_fac_idx=[0])
T = 10
obs = env.reset()
msg = """ === Starting experiment === \n True scene: {} Initial observation {} """
print(msg.format(env.true_scene, obs))
for t in range(T):
qx = agent.infer_states(obs)
msg = """[{}] Inference [location {} / scene {}] """
print(msg.format(t, qx[0].sample(), qx[1].sample(), obs[0], obs[1]))
q_pi, efe = agent.infer_policies()
action = agent.sample_action()
msg = """[Step {}] Action: [Saccade to location {}]"""
print(msg.format(t, action[0]))
obs = env.step(action)
msg = """[Step {}] Observation: [Location {}, Feature {}]"""
print(msg.format(t, obs[0], obs[1]))
| # import sys
# import pathlib
# import numpy as np
# sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
from pymdp.agent import Agent
from pymdp.envs import VisualForagingEnv
print(
"""Initializing scene configuration with two scenes that share one feature
in common. Therefore each scene has only one disambiguating feature\n"""
)
scenes = np.zeros((2, 2, 2))
scenes[0][0, 0] = 1
scenes[0][1, 1] = 2
scenes[1][1, 1] = 2
scenes[1][1, 0] = 3
env = VisualForagingEnv(scenes=scenes, n_features=3)
agent = Agent(A=env.get_likelihood_dist(), B=env.get_transition_dist(), control_fac_idx=[0])
T = 10
obs = env.reset()
msg = """ === Starting experiment === \n True scene: {} Initial observation {} """
print(msg.format(env.true_scene, obs))
for t in range(T):
qx = agent.infer_states(obs)
msg = """[{}] Inference [location {} / scene {}] """
print(msg.format(t, qx[0].sample(), qx[1].sample(), obs[0], obs[1]))
q_pi, efe = agent.infer_policies()
action = agent.sample_action()
msg = """[Step {}] Action: [Saccade to location {}]"""
print(msg.format(t, action[0]))
obs = env.step(action)
msg = """[Step {}] Observation: [Location {}, Feature {}]"""
print(msg.format(t, obs[0], obs[1]))
| en | 0.760572 | # import sys # import pathlib # import numpy as np # sys.path.append(str(pathlib.Path(__file__).parent.parent.parent)) Initializing scene configuration with two scenes that share one feature in common. Therefore each scene has only one disambiguating feature\n === Starting experiment === \n True scene: {} Initial observation {} [{}] Inference [location {} / scene {}] [Step {}] Action: [Saccade to location {}] [Step {}] Observation: [Location {}, Feature {}] | 2.411522 | 2 |
analytix/features.py | Jonxslays/analytix | 0 | 6614114 | # Copyright (c) 2021-present, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import typing as t
from analytix import abc, data, errors
class CompareMixin:
values: set[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class NestedCompareMixin:
values: set[abc.SetType]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class Metrics(abc.FeatureType, CompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not len(inputs):
raise errors.MissingMetrics()
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_METRICS
if diff:
raise errors.InvalidMetrics(diff)
diff = inputs - self.values
if diff:
raise errors.UnsupportedMetrics(diff)
class SortOptions(abc.FeatureType, CompareMixin):
def __init__(self, *args: str, descending_only: bool = False) -> None:
super().__init__(*args)
self.descending_only = descending_only
def validate(self, inputs: t.Collection[str]) -> None:
raw_inputs = set(i.strip("-") for i in inputs)
if not isinstance(inputs, set):
inputs = set(inputs)
diff = raw_inputs - data.ALL_METRICS
if diff:
raise errors.InvalidSortOptions(diff)
diff = raw_inputs - self.values
if diff:
raise errors.UnsupportedSortOptions(diff)
if self.descending_only:
diff = {i for i in inputs if not i.startswith("-")}
if diff:
raise errors.UnsupportedSortOptions(diff, descending_only=True)
class Dimensions(abc.SegmentedFeatureType, NestedCompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_DIMENSIONS
if diff:
depr = inputs & data.DEPRECATED_DIMENSIONS
raise errors.InvalidDimensions(diff, depr)
diff = inputs - self.every
if diff:
raise errors.UnsupportedDimensions(diff)
for set_type in self.values:
set_type.validate_dimensions(inputs)
class Filters(abc.MappingFeatureType, NestedCompareMixin):
@property
def every_key(self) -> set[str]:
return {v[: v.index("=")] if "==" in v else v for v in self.every}
@property
def locked(self) -> dict[str, str]:
locked = {}
for set_type in self.values:
for value in filter(lambda v: "==" in v, set_type.values):
k, v = value.split("==")
locked.update({k: v})
return locked
def validate(self, inputs: dict[str, str]) -> None:
keys = set(inputs.keys())
locked = self.locked
diff = keys - data.ALL_FILTERS
if diff:
raise errors.InvalidFilters(diff)
for k, v in inputs.items():
valid = data.VALID_FILTER_OPTIONS[k]
if valid and (v not in valid):
raise errors.InvalidFilterValue(k, v)
if k in locked.keys():
if v != locked[k]:
raise errors.UnsupportedFilterValue(k, v)
diff = keys - self.every_key
if diff:
raise errors.UnsupportedFilters(diff)
for set_type in self.values:
set_type.validate_filters(keys)
class Required(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if self.values & inputs == self.values:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("all", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if self.expd_keys & keys == self.expd_keys:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("all", common, self.values)
class ExactlyOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) == 1:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) == 1:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("1", common, self.values)
class OneOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) > 0:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("at least 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) > 0:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("at least 1", common, self.values)
class Optional(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
class ZeroOrOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) < 2:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("0 or 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) < 2:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("0 or 1", common, self.values)
class ZeroOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
| # Copyright (c) 2021-present, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import typing as t
from analytix import abc, data, errors
class CompareMixin:
values: set[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class NestedCompareMixin:
values: set[abc.SetType]
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values == other.values
def __ne__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.values != other.values
def __hash__(self) -> int:
return hash(self.__class__.__name__)
class Metrics(abc.FeatureType, CompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not len(inputs):
raise errors.MissingMetrics()
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_METRICS
if diff:
raise errors.InvalidMetrics(diff)
diff = inputs - self.values
if diff:
raise errors.UnsupportedMetrics(diff)
class SortOptions(abc.FeatureType, CompareMixin):
def __init__(self, *args: str, descending_only: bool = False) -> None:
super().__init__(*args)
self.descending_only = descending_only
def validate(self, inputs: t.Collection[str]) -> None:
raw_inputs = set(i.strip("-") for i in inputs)
if not isinstance(inputs, set):
inputs = set(inputs)
diff = raw_inputs - data.ALL_METRICS
if diff:
raise errors.InvalidSortOptions(diff)
diff = raw_inputs - self.values
if diff:
raise errors.UnsupportedSortOptions(diff)
if self.descending_only:
diff = {i for i in inputs if not i.startswith("-")}
if diff:
raise errors.UnsupportedSortOptions(diff, descending_only=True)
class Dimensions(abc.SegmentedFeatureType, NestedCompareMixin):
def validate(self, inputs: t.Collection[str]) -> None:
if not isinstance(inputs, set):
inputs = set(inputs)
diff = inputs - data.ALL_DIMENSIONS
if diff:
depr = inputs & data.DEPRECATED_DIMENSIONS
raise errors.InvalidDimensions(diff, depr)
diff = inputs - self.every
if diff:
raise errors.UnsupportedDimensions(diff)
for set_type in self.values:
set_type.validate_dimensions(inputs)
class Filters(abc.MappingFeatureType, NestedCompareMixin):
@property
def every_key(self) -> set[str]:
return {v[: v.index("=")] if "==" in v else v for v in self.every}
@property
def locked(self) -> dict[str, str]:
locked = {}
for set_type in self.values:
for value in filter(lambda v: "==" in v, set_type.values):
k, v = value.split("==")
locked.update({k: v})
return locked
def validate(self, inputs: dict[str, str]) -> None:
keys = set(inputs.keys())
locked = self.locked
diff = keys - data.ALL_FILTERS
if diff:
raise errors.InvalidFilters(diff)
for k, v in inputs.items():
valid = data.VALID_FILTER_OPTIONS[k]
if valid and (v not in valid):
raise errors.InvalidFilterValue(k, v)
if k in locked.keys():
if v != locked[k]:
raise errors.UnsupportedFilterValue(k, v)
diff = keys - self.every_key
if diff:
raise errors.UnsupportedFilters(diff)
for set_type in self.values:
set_type.validate_filters(keys)
class Required(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if self.values & inputs == self.values:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("all", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if self.expd_keys & keys == self.expd_keys:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("all", common, self.values)
class ExactlyOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) == 1:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) == 1:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("1", common, self.values)
class OneOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) > 0:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("at least 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) > 0:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("at least 1", common, self.values)
class Optional(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
class ZeroOrOne(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
if len(self.values & inputs) < 2:
return
common = len(inputs & self.values)
raise errors.InvalidSetOfDimensions("0 or 1", common, self.values)
def validate_filters(self, keys: set[str]) -> None:
if len(self.expd_keys & keys) < 2:
return
common = len(keys & self.expd_keys)
raise errors.InvalidSetOfFilters("0 or 1", common, self.values)
class ZeroOrMore(abc.SetType, CompareMixin):
def validate_dimensions(self, inputs: set[str]) -> None:
# No verifiction required.
...
def validate_filters(self, keys: set[str]) -> None:
# No verifiction required.
...
| en | 0.736234 | # Copyright (c) 2021-present, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # No verifiction required. # No verifiction required. # No verifiction required. # No verifiction required. | 1.599452 | 2 |
recommender_app/views.py | laraib-sidd/Recommender | 1 | 6614115 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import HttpResponse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate
from django.contrib import messages
# Create your views here.
def index(request):
context = dict()
if request.method == "POST":
return redirect(reverse("anime"))
return render(request, "recommender_app/index.html", context)
def anime(request):
context = {}
if request.method == "POST":
print("Method is POST")
inp = request.POST.get("anime_field", None)
if inp:
from scripts.Anime.anime import anime_recommendation
# res = anime_recommendation(inp)
# print(res)
context = dict()
return render(request, "recommender_app/anime.html")
def anime_results(request):
if not request.user.is_authenticated:
messages.warning(request, "Sorry, you dont have access to that page.")
return redirect(reverse("index"))
context = {}
success = False
if request.method == "POST":
print("Anime results view reached, method is post!")
inp = request.POST.get("anime_field", None)
if inp:
from scripts.Anime.anime import anime_recommendation
print("About to run the anime script")
try:
df = anime_recommendation(inp)
error = False
except:
error = True
print("Script successfully run")
success = True
context["success"] = success
# context['df' : df]
# return render(request, 'recommender_app/anime_results.html', context)
# return render(request, df.to_html(), context)
if not error:
return HttpResponse(df.to_html())
else:
return render(request, "recommender_app/anime_results.html", {"error": error})
def music(request):
context = dict()
return render(request, "recommender_app/music.html")
def signup(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("<PASSWORD>")
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect("index")
else:
errors = form.errors
return render(request, "recommender_app/signup.html", {"form": form})
print("error!")
else:
form = UserCreationForm()
return render(request, "recommender_app/signup.html", {"form": form})
| from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import HttpResponse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate
from django.contrib import messages
# Create your views here.
def index(request):
context = dict()
if request.method == "POST":
return redirect(reverse("anime"))
return render(request, "recommender_app/index.html", context)
def anime(request):
context = {}
if request.method == "POST":
print("Method is POST")
inp = request.POST.get("anime_field", None)
if inp:
from scripts.Anime.anime import anime_recommendation
# res = anime_recommendation(inp)
# print(res)
context = dict()
return render(request, "recommender_app/anime.html")
def anime_results(request):
if not request.user.is_authenticated:
messages.warning(request, "Sorry, you dont have access to that page.")
return redirect(reverse("index"))
context = {}
success = False
if request.method == "POST":
print("Anime results view reached, method is post!")
inp = request.POST.get("anime_field", None)
if inp:
from scripts.Anime.anime import anime_recommendation
print("About to run the anime script")
try:
df = anime_recommendation(inp)
error = False
except:
error = True
print("Script successfully run")
success = True
context["success"] = success
# context['df' : df]
# return render(request, 'recommender_app/anime_results.html', context)
# return render(request, df.to_html(), context)
if not error:
return HttpResponse(df.to_html())
else:
return render(request, "recommender_app/anime_results.html", {"error": error})
def music(request):
context = dict()
return render(request, "recommender_app/music.html")
def signup(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get("username")
raw_password = form.cleaned_data.get("<PASSWORD>")
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect("index")
else:
errors = form.errors
return render(request, "recommender_app/signup.html", {"form": form})
print("error!")
else:
form = UserCreationForm()
return render(request, "recommender_app/signup.html", {"form": form})
| en | 0.321983 | # Create your views here. # res = anime_recommendation(inp) # print(res) # context['df' : df] # return render(request, 'recommender_app/anime_results.html', context) # return render(request, df.to_html(), context) | 2.458428 | 2 |
src/proposals/migrations/0012_auto_20151222_1745.py | kaka-lin/pycon.tw | 47 | 6614116 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-22 17:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0011_auto_20151222_1743'),
]
operations = [
migrations.AddField(
model_name='talkproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
migrations.AddField(
model_name='tutorialproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-22 17:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0011_auto_20151222_1743'),
]
operations = [
migrations.AddField(
model_name='talkproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
migrations.AddField(
model_name='tutorialproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
]
| en | 0.853449 | # -*- coding: utf-8 -*- # Generated by Django 1.9 on 2015-12-22 17:45 | 1.558527 | 2 |
app/simulation/PlatoonSimulation.py | alinaciuysal/SimplaPlatoon | 2 | 6614117 | import json
import traci
from app.streaming import KafkaPublisher, KafkaConnector
from app.logging import *
import os.path
import app.Config as Config
simulationEnded = False
class PlatoonSimulation(object):
@classmethod
def applyFileConfig(cls):
""" reads configs from a json and applies it at realtime to the simulation """
try:
file_path = os.path.join("parameters.json") # relative path to Run.py
with open(file_path) as f:
parameters = json.load(f)
cls.changeVariables(parameters=parameters)
info("# New parameters -> " + str(Config.parameters), Fore.GREEN)
except Exception as e:
print(e)
pass
@classmethod
def applyKafkaConfig(cls):
""" Gets a new configuration value from Kafka"""
new_conf = KafkaConnector.checkForNewConfiguration()
if new_conf is not None:
info("new configuration arrived" + str(new_conf), Fore.GREEN)
cls.changeVariables(parameters=new_conf)
@classmethod
def start(cls, platoon_mgr):
""" start the simulation """
info("# Applying file config")
cls.applyFileConfig()
info("# Started adding initial cars to the simulation", Fore.GREEN)
platoon_mgr.applyCarCounter()
kafkaIndex = 1
while 1:
# let the cars process this step via platoonmgr
traci.simulationStep()
# apply kafka config in 10 ticks
if kafkaIndex % 10 == 0:
cls.applyKafkaConfig()
kafkaIndex += 1
@classmethod
def changeVariables(cls, parameters):
'''
:param parameters: dict that contains parameters to be changed
:return:
'''
for variable_name in parameters:
value = parameters[variable_name]
# there should be a better way instead of these statements
if variable_name == "maxVehiclesInPlatoon":
Config.parameters["changeable"]["maxVehiclesInPlatoon"] = value
elif variable_name == "catchupDistance":
Config.parameters["changeable"]["catchupDistance"] = value
Config.parameters["contextual"]["lookAheadDistance"] = value # set same value of catchupDist, as we did in experiments
elif variable_name == "maxPlatoonGap":
Config.parameters["changeable"]["maxPlatoonGap"] = value
elif variable_name == "platoonSplitTime":
Config.parameters["changeable"]["platoonSplitTime"] = value
elif variable_name == "joinDistance":
Config.parameters["changeable"]["joinDistance"] = value
elif variable_name == "switchImpatienceFactor":
Config.parameters["contextual"]["switchImpatienceFactor"] = value
elif variable_name == "totalCarCounter":
Config.parameters["contextual"]["totalCarCounter"] = value
elif variable_name == "platoonCarCounter":
Config.parameters["contextual"]["platoonCarCounter"] = value
elif variable_name == "extended_simpla_logic":
Config.parameters["contextual"]["extended_simpla_logic"] = value
else:
warn(str(variable_name) + " does not exist in Config.py", Fore.RED)
| import json
import traci
from app.streaming import KafkaPublisher, KafkaConnector
from app.logging import *
import os.path
import app.Config as Config
simulationEnded = False
class PlatoonSimulation(object):
@classmethod
def applyFileConfig(cls):
""" reads configs from a json and applies it at realtime to the simulation """
try:
file_path = os.path.join("parameters.json") # relative path to Run.py
with open(file_path) as f:
parameters = json.load(f)
cls.changeVariables(parameters=parameters)
info("# New parameters -> " + str(Config.parameters), Fore.GREEN)
except Exception as e:
print(e)
pass
@classmethod
def applyKafkaConfig(cls):
""" Gets a new configuration value from Kafka"""
new_conf = KafkaConnector.checkForNewConfiguration()
if new_conf is not None:
info("new configuration arrived" + str(new_conf), Fore.GREEN)
cls.changeVariables(parameters=new_conf)
@classmethod
def start(cls, platoon_mgr):
""" start the simulation """
info("# Applying file config")
cls.applyFileConfig()
info("# Started adding initial cars to the simulation", Fore.GREEN)
platoon_mgr.applyCarCounter()
kafkaIndex = 1
while 1:
# let the cars process this step via platoonmgr
traci.simulationStep()
# apply kafka config in 10 ticks
if kafkaIndex % 10 == 0:
cls.applyKafkaConfig()
kafkaIndex += 1
@classmethod
def changeVariables(cls, parameters):
'''
:param parameters: dict that contains parameters to be changed
:return:
'''
for variable_name in parameters:
value = parameters[variable_name]
# there should be a better way instead of these statements
if variable_name == "maxVehiclesInPlatoon":
Config.parameters["changeable"]["maxVehiclesInPlatoon"] = value
elif variable_name == "catchupDistance":
Config.parameters["changeable"]["catchupDistance"] = value
Config.parameters["contextual"]["lookAheadDistance"] = value # set same value of catchupDist, as we did in experiments
elif variable_name == "maxPlatoonGap":
Config.parameters["changeable"]["maxPlatoonGap"] = value
elif variable_name == "platoonSplitTime":
Config.parameters["changeable"]["platoonSplitTime"] = value
elif variable_name == "joinDistance":
Config.parameters["changeable"]["joinDistance"] = value
elif variable_name == "switchImpatienceFactor":
Config.parameters["contextual"]["switchImpatienceFactor"] = value
elif variable_name == "totalCarCounter":
Config.parameters["contextual"]["totalCarCounter"] = value
elif variable_name == "platoonCarCounter":
Config.parameters["contextual"]["platoonCarCounter"] = value
elif variable_name == "extended_simpla_logic":
Config.parameters["contextual"]["extended_simpla_logic"] = value
else:
warn(str(variable_name) + " does not exist in Config.py", Fore.RED)
| en | 0.814124 | reads configs from a json and applies it at realtime to the simulation # relative path to Run.py Gets a new configuration value from Kafka start the simulation # let the cars process this step via platoonmgr # apply kafka config in 10 ticks :param parameters: dict that contains parameters to be changed :return: # there should be a better way instead of these statements # set same value of catchupDist, as we did in experiments | 2.575971 | 3 |
temporal-cluster-matching/experiments/run_parameter_sweep_color.py | microsoft/solar-farms-mapping | 12 | 6614118 | '''
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
'''
import time
import subprocess
from multiprocessing import Process, Queue
def do_work(work):
while not work.empty():
experiment = work.get()
print(experiment)
subprocess.call(experiment.split(" "))
return True
NUM_PROCESSES = 5
work = Queue()
################################################
# Run the algorithm with the dataset footprints
################################################
datasets = ["poultry_barns", "solar_farms_reduced"]
buffer_options = {
"poultry_barns": [400,200,100],
"solar_farms_reduced": [0.024,0.016],
}
for dataset in datasets:
for buffer in buffer_options[dataset]:
command = f"python run_algorithm.py --dataset {dataset} --buffer {buffer} --output_dir results/color/{dataset}-0-{buffer}/ --algorithm color"
work.put(command)
################################################
# Run the algorithm with the random polygons
################################################
datasets = ["poultry_barns_random", "solar_farms_reduced_random"]
buffer_options = {
"poultry_barns_random": [400,200,100],
"solar_farms_reduced_random": [0.024,0.016],
}
for dataset in datasets:
for buffer in buffer_options[dataset]:
command = f"python run_algorithm.py --dataset {dataset} --buffer {buffer} --output_dir results/color/{dataset}-0-{buffer}/ --algorithm color"
work.put(command)
## Start experiments
processes = []
start_time = time.time()
for i in range(NUM_PROCESSES):
p = Process(target=do_work, args=(work,))
processes.append(p)
p.start()
for p in processes:
p.join() | '''
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
'''
import time
import subprocess
from multiprocessing import Process, Queue
def do_work(work):
while not work.empty():
experiment = work.get()
print(experiment)
subprocess.call(experiment.split(" "))
return True
NUM_PROCESSES = 5
work = Queue()
################################################
# Run the algorithm with the dataset footprints
################################################
datasets = ["poultry_barns", "solar_farms_reduced"]
buffer_options = {
"poultry_barns": [400,200,100],
"solar_farms_reduced": [0.024,0.016],
}
for dataset in datasets:
for buffer in buffer_options[dataset]:
command = f"python run_algorithm.py --dataset {dataset} --buffer {buffer} --output_dir results/color/{dataset}-0-{buffer}/ --algorithm color"
work.put(command)
################################################
# Run the algorithm with the random polygons
################################################
datasets = ["poultry_barns_random", "solar_farms_reduced_random"]
buffer_options = {
"poultry_barns_random": [400,200,100],
"solar_farms_reduced_random": [0.024,0.016],
}
for dataset in datasets:
for buffer in buffer_options[dataset]:
command = f"python run_algorithm.py --dataset {dataset} --buffer {buffer} --output_dir results/color/{dataset}-0-{buffer}/ --algorithm color"
work.put(command)
## Start experiments
processes = []
start_time = time.time()
for i in range(NUM_PROCESSES):
p = Process(target=do_work, args=(work,))
processes.append(p)
p.start()
for p in processes:
p.join() | de | 0.4993 | Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ################################################ # Run the algorithm with the dataset footprints ################################################ ################################################ # Run the algorithm with the random polygons ################################################ ## Start experiments | 3.213347 | 3 |
python/climate_ae/models/ae/train_linear_model.py | christinaheinze/latent-linear-adjustment-autoencoders- | 0 | 6614119 | import datetime
import json
import numpy as np
import os
import pickle
import tensorflow as tf
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
from absl import logging
logging.set_verbosity(logging.INFO)
from sklearn.linear_model import LinearRegression
import local_settings
from climate_ae.models import utils
from climate_ae.data_generator.datahandler import input_fn
import climate_ae.models.ae.eval_utils as eval_utils
import climate_ae.models.ae.climate_utils as climate_utils
import climate_ae.models.ae.train as train
DEBUG = False
process_additional_holdout_members = False
def load_data(inputs, model, subset=False, debug=False):
# get training data for linear latent space model
for b, features in enumerate(inputs):
if debug and b % 20 == 0 and b > 0:
break
if b % 100 == 0:
print(b)
input_ = features["inputs"]
recon_ = model.autoencode(input_, training=False)["output"]
anno_ = train.get_annotations(features)
year_ = features["year"]
month_ = features["month"]
day_ = features["day"]
encodings_ = model.mean_encode(input_, training=False)['z'].numpy()
# encodings_z = encodings['z'].numpy()
if b == 0:
inputs = input_
recons = recon_
latents = encodings_
annos = anno_
years = year_
months = month_
days = day_
else:
latents = np.r_[latents, encodings_]
annos = np.r_[annos, anno_]
if subset and b <= 10:
# just keep a subset in memory
inputs = np.r_[inputs, input_]
recons = np.r_[recons, recon_]
years = np.r_[years, year_]
months = np.r_[months, month_]
days = np.r_[days, day_]
else:
inputs = np.r_[inputs, input_]
recons = np.r_[recons, recon_]
years = np.r_[years, year_]
months = np.r_[months, month_]
days = np.r_[days, day_]
return inputs, recons, latents, annos, years, months, days
def predict_latents_and_decode(model, reg_model, annos, out_shape):
# predict latents
latentshat = reg_model.predict(annos)
# decode predicted latents
xhatexp = np.zeros(out_shape)
for i in range(xhatexp.shape[0]):
xhatexp[i, ...] = model.decode(np.expand_dims(latentshat[i, ...],
axis=0), training=False)["output"]
return xhatexp
def process_holdout(holdout_datasets, model, reg_model, save_nc_files, out_dir):
results = {}
for ho in holdout_datasets:
print(ho)
result = load_data(holdout_datasets[ho], model, debug=DEBUG)
ho_inputs, ho_recons, _, ho_annos, ho_years, ho_months, ho_days = result
# predict latents for holdout set and decode
ho_xhatexp = predict_latents_and_decode(model, reg_model, ho_annos,
np.shape(ho_inputs))
results[ho] = ho_inputs, ho_recons, ho_annos, ho_xhatexp
if save_nc_files:
# save
climate_utils.save_ncdf_file_high_res_prec(ho_inputs, ho_years, ho_months,
ho_days, "ho_{}_input.nc".format(ho), out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_xhatexp, ho_years, ho_months,
ho_days, "ho_{}_pred.nc".format(ho), out_dir)
return results
def holdout_plots(results, model, reg, label, precip, offset, out_dir, out_dir_orig):
ho_inputs, ho_recons, ho_annos, ho_xhatexp = results
r2_maps_ho = eval_utils.plot_r2_map(ho_inputs, ho_recons,
ho_xhatexp, out_dir, "holdout_{}".format(label))
mse_map_ho = eval_utils.plot_mse_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout_{}".format(label))
mean_mse_x_xhat = np.mean(mse_map_ho[0])
mean_mse_x_xhatexp = np.mean(mse_map_ho[1])
mean_r2_x_xhat = np.mean(r2_maps_ho[0])
mean_r2_x_xhatexp = np.mean(r2_maps_ho[1])
eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_{}".format(label))
print("\n#### Holdout ensemble: {}".format(label))
print("Mean MSE(x, xhat): {}".format(mean_mse_x_xhat))
print("Mean MSE(x, xhatexp): {}".format(mean_mse_x_xhatexp))
print("Mean R2(x, xhat): {}".format(mean_r2_x_xhat))
print("Mean R2(x, xhatexp): {}".format(mean_r2_x_xhatexp))
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir, "metrics_{}.json".format(label))
metrics = {'mean_mse_x_xhat': mean_mse_x_xhat,
'mean_mse_x_xhatexp': mean_mse_x_xhatexp,
'mean_r2_x_xhat': mean_r2_x_xhat,
'mean_r2_x_xhatexp': mean_r2_x_xhatexp}
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if precip:
ho_inputs_2 = ho_inputs ** 2
ho_recons_2 = ho_recons ** 2
ho_xhatexp_2 = ho_xhatexp ** 2
if offset:
ho_inputs_2 = ho_inputs_2 - 25
ho_recons_2 = ho_recons_2 - 25
ho_xhatexp_2 = ho_xhatexp_2 - 25
r2_maps_ho_orig = eval_utils.plot_r2_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig_{}".format(label))
mse_map_ho_orig = eval_utils.plot_mse_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig_{}".format(label))
mean_mse_x_xhat = np.mean(mse_map_ho_orig[0])
mean_mse_x_xhatexp = np.mean(mse_map_ho_orig[1])
mean_r2_x_xhat = np.mean(r2_maps_ho_orig[0])
mean_r2_x_xhatexp = np.mean(r2_maps_ho_orig[1])
eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir_orig,
"holdout_orig_{}".format(label), transform_back=True, offset=offset)
print("\n# Orig: {}".format(label))
print("Mean MSE(x, xhat): {}".format(mean_mse_x_xhat))
print("Mean MSE(x, xhatexp): {}".format(mean_mse_x_xhatexp))
print("Mean R2(x, xhat): {}".format(mean_r2_x_xhat))
print("Mean R2(x, xhatexp): {}".format(mean_r2_x_xhatexp))
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir_orig, "metrics_orig_{}.json".format(label))
metrics = {'mean_mse_x_xhat': mean_mse_x_xhat,
'mean_mse_x_xhatexp': mean_mse_x_xhatexp,
'mean_r2_x_xhat': mean_r2_x_xhat,
'mean_r2_x_xhatexp': mean_r2_x_xhatexp}
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
def train_linear_model(checkpoint_path, load_json, results_path, precip, offset, save_nc_files):
# get configs from model
with open(os.path.join(checkpoint_path, "hparams.pkl"), 'rb') as f:
config = pickle.load(f)
config = utils.config_to_namedtuple(config)
model, _ = train.get_models(config)
# input function
def input_anno(params, mode, repeat, n_repeat=None):
dataset = input_fn(params=params, mode=mode, repeat=repeat,
n_repeat=n_repeat, shuffle=False)
if len(params.temp_indices) == 0 and len(params.psl_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.gather(x["anno"], params.anno_indices, axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
elif len(params.temp_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
elif len(params.psl_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
else:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1),
tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
return dataset
global_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False,
name="global_step")
train_inputs = input_anno(params=config, mode="train",
repeat=False)
test_inputs = input_anno(params=config, mode="test1",
repeat=False)
holdout_inputs = input_anno(params=config, mode="test2",
repeat=False)
# dummy run - otherwise, the model wouldn't be fully build
show_inputs = iter(train_inputs)
_ = model(next(show_inputs)["inputs"])
# restore model from checkpoint
checkpoint = tf.train.Checkpoint(model=model, global_step=global_step)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=3)
status = checkpoint.restore(manager.latest_checkpoint)
status.assert_consumed()
# get training data for linear latent space model
tr_inputs, _, tr_latents, tr_annos, _, _, _ = load_data(train_inputs, model,
subset=True, debug=DEBUG)
# fit linear model
reg = LinearRegression().fit(tr_annos, tr_latents)
# get test data
te_inputs, te_recons, _, te_annos, te_years, te_months, te_days = \
load_data(test_inputs, model, debug=DEBUG)
# predict latents for test set and decode
te_xhatexp = predict_latents_and_decode(model, reg, te_annos,
np.shape(te_inputs))
# get holdout data
ho_inputs, ho_recons, _, ho_annos, ho_years, ho_months, ho_days = \
load_data(holdout_inputs, model, debug=DEBUG)
# predict latents for holdout set and decode
ho_xhatexp = predict_latents_and_decode(model, reg, ho_annos,
np.shape(ho_inputs))
# setup folder to save results
current_time = datetime.datetime.now().strftime(r"%y%m%d_%H%M")
out_dir = os.path.join(checkpoint_path, "eval_{}".format(current_time))
os.makedirs(out_dir, exist_ok=True)
# save
if save_nc_files:
climate_utils.save_ncdf_file_high_res_prec(te_inputs, te_years,
te_months, te_days, "te_input.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(te_xhatexp, te_years,
te_months, te_days, "te_pred.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_inputs, ho_years,
ho_months, ho_days, "ho_input.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_xhatexp, ho_years,
ho_months, ho_days, "ho_pred.nc", out_dir)
#################
# plots
#################
# R2 map
r2_maps_test = eval_utils.plot_r2_map(te_inputs, te_recons, te_xhatexp,
out_dir, "test")
np.save(os.path.join(out_dir, "r2map_test_xxhat.npy"), r2_maps_test[0])
np.save(os.path.join(out_dir, "r2map_test_xxhatexp.npy"), r2_maps_test[1])
r2_maps_ho = eval_utils.plot_r2_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout")
np.save(os.path.join(out_dir, "r2map_ho_xxhat.npy"), r2_maps_ho[0])
np.save(os.path.join(out_dir, "r2map_ho_xxhatexp.npy"), r2_maps_ho[1])
# MSE map
mse_map_test = eval_utils.plot_mse_map(te_inputs, te_recons, te_xhatexp,
out_dir, "test")
np.save(os.path.join(out_dir, "mse_map_test_xxhat.npy"), mse_map_test[0])
np.save(os.path.join(out_dir, "mse_map_test_xxhatexp.npy"), mse_map_test[1])
mse_map_ho = eval_utils.plot_mse_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout")
np.save(os.path.join(out_dir, "mse_map_ho_xxhat.npy"), mse_map_ho[0])
np.save(os.path.join(out_dir, "mse_map_ho_xxhatexp.npy"), mse_map_ho[1])
# visualize reconstructions and interventions -- random
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test")
np.save(os.path.join(out_dir, "te_x.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout")
np.save(os.path.join(out_dir, "ho_x.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp.npy"), imgs_ho[2])
imgs_tr = eval_utils.visualize(tr_inputs, tr_annos, model, reg, out_dir,
"train")
np.save(os.path.join(out_dir, "tr_x.npy"), imgs_tr[0])
np.save(os.path.join(out_dir, "tr_xhat.npy"), imgs_tr[1])
np.save(os.path.join(out_dir, "tr_xhatexp.npy"), imgs_tr[2])
# visualize reconstructions and interventions -- quantiles
idx_quantiles_te = eval_utils.get_field_mse_quantile_idx(te_inputs, te_xhatexp)
idx_quantiles_ho = eval_utils.get_field_mse_quantile_idx(ho_inputs, ho_xhatexp)
print("Indices test:")
print(idx_quantiles_te)
print("Indices ho:")
print(idx_quantiles_ho)
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test_q_mse", random=False, idx=idx_quantiles_te[0])
np.save(os.path.join(out_dir, "te_x_q_mse.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat_q_mse.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp_q_mse.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_q_mse", random=False, idx=idx_quantiles_ho[0])
np.save(os.path.join(out_dir, "ho_x_q_mse.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat_q_mse.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp_q_mse.npy"), imgs_ho[2])
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test_q_r2", random=False, idx=idx_quantiles_te[1])
np.save(os.path.join(out_dir, "te_x_q_r2.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat_q_r2.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp_q_r2.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_q_r2", random=False, idx=idx_quantiles_ho[1])
np.save(os.path.join(out_dir, "ho_x_q_r2.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat_q_r2.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp_q_r2.npy"), imgs_ho[2])
#################
# save summaries of metrics maps
#################
# mean MSE over entire field
test_metrics = {}
test_metrics.update({"mean_mse_x_xhat": np.mean(mse_map_test[0])})
test_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_map_test[1])})
# mean R2 over entire field
test_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_test[0])})
test_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_test[1])})
# mean MSE over entire field
ho_metrics = {}
ho_metrics.update({"mean_mse_x_xhat": np.mean(mse_map_ho[0])})
ho_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_map_ho[1])})
# mean R2 over entire field
ho_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_ho[0])})
ho_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_ho[1])})
metrics = {'test': test_metrics, 'ho': ho_metrics}
# print
print("Metrics:")
for entry in metrics:
print(entry)
print(metrics[entry])
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir, "metrics.json")
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if load_json:
# save metrics in json file
exp_jsons = os.listdir(results_path)
exp_json = [f for f in exp_jsons if config.id in f][0]
exp_json_path = os.path.join(results_path, exp_json)
results = utils.load_json(exp_json_path)
results[config.id]['linear_model_test'] = test_metrics
results[config.id]['linear_model_ho'] = ho_metrics
with open(exp_json_path, 'w') as result_file:
json.dump(results, result_file, sort_keys=True, indent=4)
# if precipitation data, transform back to original scale
if precip:
out_dir_orig = "{}_orig".format(out_dir)
te_inputs_2 = te_inputs ** 2
te_recons_2 = te_recons ** 2
te_xhatexp_2 = te_xhatexp ** 2
if offset:
te_inputs_2 = te_inputs_2 - 25
te_recons_2 = te_recons_2 - 25
te_xhatexp_2 = te_xhatexp_2 - 25
r2_maps_test_orig = eval_utils.plot_r2_map(te_inputs_2, te_recons_2,
te_xhatexp_2, out_dir_orig, "test_orig")
np.save(os.path.join(out_dir_orig, "r2map_test_orig_xxhat.npy"),
r2_maps_test_orig[0])
np.save(os.path.join(out_dir_orig, "r2map_test_orig_xxhatexp.npy"),
r2_maps_test_orig[1])
mse_maps_test_orig = eval_utils.plot_mse_map(te_inputs_2, te_recons_2,
te_xhatexp_2, out_dir_orig, "test_orig")
np.save(os.path.join(out_dir_orig, "mse_map_test_orig_xxhat.npy"),
mse_maps_test_orig[0])
np.save(os.path.join(out_dir_orig, "mse_map_test_orig_xxhatexp.npy"),
mse_maps_test_orig[1])
ho_inputs_2 = ho_inputs ** 2
ho_recons_2 = ho_recons ** 2
ho_xhatexp_2 = ho_xhatexp ** 2
if offset:
ho_inputs_2 = ho_inputs_2 - 25
ho_recons_2 = ho_recons_2 - 25
ho_xhatexp_2 = ho_xhatexp_2 - 25
r2_maps_ho_orig = eval_utils.plot_r2_map(ho_inputs_2, ho_recons_2, ho_xhatexp_2,
out_dir_orig, "holdout_orig")
np.save(os.path.join(out_dir_orig, "r2map_ho_orig_xxhat.npy"),
r2_maps_ho_orig[0])
np.save(os.path.join(out_dir_orig, "r2map_ho_orig_xxhatexp.npy"),
r2_maps_ho_orig[1])
mse_maps_ho_orig = eval_utils.plot_mse_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig")
np.save(os.path.join(out_dir_orig, "mse_map_ho_orig_xxhat.npy"),
mse_maps_ho_orig[0])
np.save(os.path.join(out_dir_orig, "mse_map_ho_orig_xxhatexp.npy"),
mse_maps_ho_orig[1])
# visualize reconstructions and interventions
imgs_te_orig = eval_utils.visualize(te_inputs, te_annos, model, reg,
out_dir_orig, "test_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "te_orig_x.npy"), imgs_te_orig[0])
np.save(os.path.join(out_dir_orig, "te_orig_xhat.npy"), imgs_te_orig[1])
np.save(os.path.join(out_dir_orig, "te_orig_xhatexp.npy"), imgs_te_orig[2])
imgs_ho_orig = eval_utils.visualize(ho_inputs, ho_annos, model, reg,
out_dir_orig, "holdout_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "ho_orig_x.npy"), imgs_ho_orig[0])
np.save(os.path.join(out_dir_orig, "ho_orig_xhat.npy"), imgs_ho_orig[1])
np.save(os.path.join(out_dir_orig, "ho_orig_xhatexp.npy"), imgs_ho_orig[2])
imgs_tr_orig = eval_utils.visualize(tr_inputs, tr_annos, model, reg,
out_dir_orig, "train_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "tr_orig_x.npy"), imgs_tr_orig[0])
np.save(os.path.join(out_dir_orig, "tr_orig_xhat.npy"), imgs_tr_orig[1])
np.save(os.path.join(out_dir_orig, "tr_orig_xhatexp.npy"), imgs_tr_orig[2])
#################
# save summaries of metrics maps
#################
# mean MSE over entire field
test_metrics = {}
test_metrics.update({"mean_mse_x_xhat": np.mean(mse_maps_test_orig[0])})
test_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_maps_test_orig[1])})
# mean R2 over entire field
test_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_test_orig[0])})
test_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_test_orig[1])})
# mean MSE over entire field
ho_metrics = {}
ho_metrics.update({"mean_mse_x_xhat": np.mean(mse_maps_ho_orig[0])})
ho_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_maps_ho_orig[1])})
# mean R2 over entire field
ho_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_ho_orig[0])})
ho_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_ho_orig[1])})
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir_orig, "metrics_orig.json")
metrics = {'test': test_metrics, 'ho': ho_metrics}
# print
print("Metrics:")
for entry in metrics:
print(entry)
print(metrics[entry])
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if load_json:
# save metrics in json file
exp_jsons = os.listdir(results_path)
exp_json = [f for f in exp_jsons if config.id in f][0]
exp_json_path = os.path.join(results_path, exp_json)
results = utils.load_json(exp_json_path)
results[config.id]['linear_model_test_orig'] = test_metrics
results[config.id]['linear_model_ho_orig'] = ho_metrics
with open(exp_json_path, 'w') as result_file:
json.dump(results, result_file, sort_keys=True, indent=4)
if process_additional_holdout_members:
holdout_names = ["kbd", "kbf", "kbh", "kbj",
"kbl", "kbn", "kbo", "kbp", "kbr"]
# "kbt", "kbu", "kbv", "kbw", "kbx",
# "kby", "kbz", "kca", "kcb", "kcc",
# "kcd", "kce", "kcf", "kcg", "kch",
# "kci", "kcj", "kck", "kcl", "kcm",
# "kcn", "kco", "kcp", "kcq", "kcr",
# "kcs", "kct", "kcu", "kcv", "kcw", "kcx"]
holdout_datasets = {}
for ho in holdout_names:
holdout_datasets[ho] = input_anno(params=config,
mode="test_{}".format(ho),
repeat=False)
# process and save predictions for additional holdout datasets
results = process_holdout(holdout_datasets, model, reg, save_nc_files, out_dir)
for ho in results:
holdout_plots(results[ho], model, reg, ho, precip, offset, out_dir, out_dir_orig)
| import datetime
import json
import numpy as np
import os
import pickle
import tensorflow as tf
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
from absl import logging
logging.set_verbosity(logging.INFO)
from sklearn.linear_model import LinearRegression
import local_settings
from climate_ae.models import utils
from climate_ae.data_generator.datahandler import input_fn
import climate_ae.models.ae.eval_utils as eval_utils
import climate_ae.models.ae.climate_utils as climate_utils
import climate_ae.models.ae.train as train
DEBUG = False
process_additional_holdout_members = False
def load_data(inputs, model, subset=False, debug=False):
# get training data for linear latent space model
for b, features in enumerate(inputs):
if debug and b % 20 == 0 and b > 0:
break
if b % 100 == 0:
print(b)
input_ = features["inputs"]
recon_ = model.autoencode(input_, training=False)["output"]
anno_ = train.get_annotations(features)
year_ = features["year"]
month_ = features["month"]
day_ = features["day"]
encodings_ = model.mean_encode(input_, training=False)['z'].numpy()
# encodings_z = encodings['z'].numpy()
if b == 0:
inputs = input_
recons = recon_
latents = encodings_
annos = anno_
years = year_
months = month_
days = day_
else:
latents = np.r_[latents, encodings_]
annos = np.r_[annos, anno_]
if subset and b <= 10:
# just keep a subset in memory
inputs = np.r_[inputs, input_]
recons = np.r_[recons, recon_]
years = np.r_[years, year_]
months = np.r_[months, month_]
days = np.r_[days, day_]
else:
inputs = np.r_[inputs, input_]
recons = np.r_[recons, recon_]
years = np.r_[years, year_]
months = np.r_[months, month_]
days = np.r_[days, day_]
return inputs, recons, latents, annos, years, months, days
def predict_latents_and_decode(model, reg_model, annos, out_shape):
# predict latents
latentshat = reg_model.predict(annos)
# decode predicted latents
xhatexp = np.zeros(out_shape)
for i in range(xhatexp.shape[0]):
xhatexp[i, ...] = model.decode(np.expand_dims(latentshat[i, ...],
axis=0), training=False)["output"]
return xhatexp
def process_holdout(holdout_datasets, model, reg_model, save_nc_files, out_dir):
results = {}
for ho in holdout_datasets:
print(ho)
result = load_data(holdout_datasets[ho], model, debug=DEBUG)
ho_inputs, ho_recons, _, ho_annos, ho_years, ho_months, ho_days = result
# predict latents for holdout set and decode
ho_xhatexp = predict_latents_and_decode(model, reg_model, ho_annos,
np.shape(ho_inputs))
results[ho] = ho_inputs, ho_recons, ho_annos, ho_xhatexp
if save_nc_files:
# save
climate_utils.save_ncdf_file_high_res_prec(ho_inputs, ho_years, ho_months,
ho_days, "ho_{}_input.nc".format(ho), out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_xhatexp, ho_years, ho_months,
ho_days, "ho_{}_pred.nc".format(ho), out_dir)
return results
def holdout_plots(results, model, reg, label, precip, offset, out_dir, out_dir_orig):
ho_inputs, ho_recons, ho_annos, ho_xhatexp = results
r2_maps_ho = eval_utils.plot_r2_map(ho_inputs, ho_recons,
ho_xhatexp, out_dir, "holdout_{}".format(label))
mse_map_ho = eval_utils.plot_mse_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout_{}".format(label))
mean_mse_x_xhat = np.mean(mse_map_ho[0])
mean_mse_x_xhatexp = np.mean(mse_map_ho[1])
mean_r2_x_xhat = np.mean(r2_maps_ho[0])
mean_r2_x_xhatexp = np.mean(r2_maps_ho[1])
eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_{}".format(label))
print("\n#### Holdout ensemble: {}".format(label))
print("Mean MSE(x, xhat): {}".format(mean_mse_x_xhat))
print("Mean MSE(x, xhatexp): {}".format(mean_mse_x_xhatexp))
print("Mean R2(x, xhat): {}".format(mean_r2_x_xhat))
print("Mean R2(x, xhatexp): {}".format(mean_r2_x_xhatexp))
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir, "metrics_{}.json".format(label))
metrics = {'mean_mse_x_xhat': mean_mse_x_xhat,
'mean_mse_x_xhatexp': mean_mse_x_xhatexp,
'mean_r2_x_xhat': mean_r2_x_xhat,
'mean_r2_x_xhatexp': mean_r2_x_xhatexp}
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if precip:
ho_inputs_2 = ho_inputs ** 2
ho_recons_2 = ho_recons ** 2
ho_xhatexp_2 = ho_xhatexp ** 2
if offset:
ho_inputs_2 = ho_inputs_2 - 25
ho_recons_2 = ho_recons_2 - 25
ho_xhatexp_2 = ho_xhatexp_2 - 25
r2_maps_ho_orig = eval_utils.plot_r2_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig_{}".format(label))
mse_map_ho_orig = eval_utils.plot_mse_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig_{}".format(label))
mean_mse_x_xhat = np.mean(mse_map_ho_orig[0])
mean_mse_x_xhatexp = np.mean(mse_map_ho_orig[1])
mean_r2_x_xhat = np.mean(r2_maps_ho_orig[0])
mean_r2_x_xhatexp = np.mean(r2_maps_ho_orig[1])
eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir_orig,
"holdout_orig_{}".format(label), transform_back=True, offset=offset)
print("\n# Orig: {}".format(label))
print("Mean MSE(x, xhat): {}".format(mean_mse_x_xhat))
print("Mean MSE(x, xhatexp): {}".format(mean_mse_x_xhatexp))
print("Mean R2(x, xhat): {}".format(mean_r2_x_xhat))
print("Mean R2(x, xhatexp): {}".format(mean_r2_x_xhatexp))
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir_orig, "metrics_orig_{}.json".format(label))
metrics = {'mean_mse_x_xhat': mean_mse_x_xhat,
'mean_mse_x_xhatexp': mean_mse_x_xhatexp,
'mean_r2_x_xhat': mean_r2_x_xhat,
'mean_r2_x_xhatexp': mean_r2_x_xhatexp}
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
def train_linear_model(checkpoint_path, load_json, results_path, precip, offset, save_nc_files):
# get configs from model
with open(os.path.join(checkpoint_path, "hparams.pkl"), 'rb') as f:
config = pickle.load(f)
config = utils.config_to_namedtuple(config)
model, _ = train.get_models(config)
# input function
def input_anno(params, mode, repeat, n_repeat=None):
dataset = input_fn(params=params, mode=mode, repeat=repeat,
n_repeat=n_repeat, shuffle=False)
if len(params.temp_indices) == 0 and len(params.psl_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.gather(x["anno"], params.anno_indices, axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
elif len(params.temp_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
elif len(params.psl_indices) == 0:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
else:
dataset = dataset.map(lambda x:
{"inputs": x["inputs"],
"anno": tf.concat(
(tf.gather(x["anno"], params.anno_indices, axis=1),
tf.gather(x["psl_mean_ens"], params.psl_indices, axis=1),
tf.gather(x["temp_mean_ens"], params.temp_indices, axis=1)),
axis=1),
"year": x["year"],
"month": x["month"],
"day": x["day"]
})
return dataset
global_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False,
name="global_step")
train_inputs = input_anno(params=config, mode="train",
repeat=False)
test_inputs = input_anno(params=config, mode="test1",
repeat=False)
holdout_inputs = input_anno(params=config, mode="test2",
repeat=False)
# dummy run - otherwise, the model wouldn't be fully build
show_inputs = iter(train_inputs)
_ = model(next(show_inputs)["inputs"])
# restore model from checkpoint
checkpoint = tf.train.Checkpoint(model=model, global_step=global_step)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=3)
status = checkpoint.restore(manager.latest_checkpoint)
status.assert_consumed()
# get training data for linear latent space model
tr_inputs, _, tr_latents, tr_annos, _, _, _ = load_data(train_inputs, model,
subset=True, debug=DEBUG)
# fit linear model
reg = LinearRegression().fit(tr_annos, tr_latents)
# get test data
te_inputs, te_recons, _, te_annos, te_years, te_months, te_days = \
load_data(test_inputs, model, debug=DEBUG)
# predict latents for test set and decode
te_xhatexp = predict_latents_and_decode(model, reg, te_annos,
np.shape(te_inputs))
# get holdout data
ho_inputs, ho_recons, _, ho_annos, ho_years, ho_months, ho_days = \
load_data(holdout_inputs, model, debug=DEBUG)
# predict latents for holdout set and decode
ho_xhatexp = predict_latents_and_decode(model, reg, ho_annos,
np.shape(ho_inputs))
# setup folder to save results
current_time = datetime.datetime.now().strftime(r"%y%m%d_%H%M")
out_dir = os.path.join(checkpoint_path, "eval_{}".format(current_time))
os.makedirs(out_dir, exist_ok=True)
# save
if save_nc_files:
climate_utils.save_ncdf_file_high_res_prec(te_inputs, te_years,
te_months, te_days, "te_input.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(te_xhatexp, te_years,
te_months, te_days, "te_pred.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_inputs, ho_years,
ho_months, ho_days, "ho_input.nc", out_dir)
climate_utils.save_ncdf_file_high_res_prec(ho_xhatexp, ho_years,
ho_months, ho_days, "ho_pred.nc", out_dir)
#################
# plots
#################
# R2 map
r2_maps_test = eval_utils.plot_r2_map(te_inputs, te_recons, te_xhatexp,
out_dir, "test")
np.save(os.path.join(out_dir, "r2map_test_xxhat.npy"), r2_maps_test[0])
np.save(os.path.join(out_dir, "r2map_test_xxhatexp.npy"), r2_maps_test[1])
r2_maps_ho = eval_utils.plot_r2_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout")
np.save(os.path.join(out_dir, "r2map_ho_xxhat.npy"), r2_maps_ho[0])
np.save(os.path.join(out_dir, "r2map_ho_xxhatexp.npy"), r2_maps_ho[1])
# MSE map
mse_map_test = eval_utils.plot_mse_map(te_inputs, te_recons, te_xhatexp,
out_dir, "test")
np.save(os.path.join(out_dir, "mse_map_test_xxhat.npy"), mse_map_test[0])
np.save(os.path.join(out_dir, "mse_map_test_xxhatexp.npy"), mse_map_test[1])
mse_map_ho = eval_utils.plot_mse_map(ho_inputs, ho_recons, ho_xhatexp,
out_dir, "holdout")
np.save(os.path.join(out_dir, "mse_map_ho_xxhat.npy"), mse_map_ho[0])
np.save(os.path.join(out_dir, "mse_map_ho_xxhatexp.npy"), mse_map_ho[1])
# visualize reconstructions and interventions -- random
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test")
np.save(os.path.join(out_dir, "te_x.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout")
np.save(os.path.join(out_dir, "ho_x.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp.npy"), imgs_ho[2])
imgs_tr = eval_utils.visualize(tr_inputs, tr_annos, model, reg, out_dir,
"train")
np.save(os.path.join(out_dir, "tr_x.npy"), imgs_tr[0])
np.save(os.path.join(out_dir, "tr_xhat.npy"), imgs_tr[1])
np.save(os.path.join(out_dir, "tr_xhatexp.npy"), imgs_tr[2])
# visualize reconstructions and interventions -- quantiles
idx_quantiles_te = eval_utils.get_field_mse_quantile_idx(te_inputs, te_xhatexp)
idx_quantiles_ho = eval_utils.get_field_mse_quantile_idx(ho_inputs, ho_xhatexp)
print("Indices test:")
print(idx_quantiles_te)
print("Indices ho:")
print(idx_quantiles_ho)
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test_q_mse", random=False, idx=idx_quantiles_te[0])
np.save(os.path.join(out_dir, "te_x_q_mse.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat_q_mse.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp_q_mse.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_q_mse", random=False, idx=idx_quantiles_ho[0])
np.save(os.path.join(out_dir, "ho_x_q_mse.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat_q_mse.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp_q_mse.npy"), imgs_ho[2])
imgs_test = eval_utils.visualize(te_inputs, te_annos, model, reg, out_dir,
"test_q_r2", random=False, idx=idx_quantiles_te[1])
np.save(os.path.join(out_dir, "te_x_q_r2.npy"), imgs_test[0])
np.save(os.path.join(out_dir, "te_xhat_q_r2.npy"), imgs_test[1])
np.save(os.path.join(out_dir, "te_xhatexp_q_r2.npy"), imgs_test[2])
imgs_ho = eval_utils.visualize(ho_inputs, ho_annos, model, reg, out_dir,
"holdout_q_r2", random=False, idx=idx_quantiles_ho[1])
np.save(os.path.join(out_dir, "ho_x_q_r2.npy"), imgs_ho[0])
np.save(os.path.join(out_dir, "ho_xhat_q_r2.npy"), imgs_ho[1])
np.save(os.path.join(out_dir, "ho_xhatexp_q_r2.npy"), imgs_ho[2])
#################
# save summaries of metrics maps
#################
# mean MSE over entire field
test_metrics = {}
test_metrics.update({"mean_mse_x_xhat": np.mean(mse_map_test[0])})
test_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_map_test[1])})
# mean R2 over entire field
test_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_test[0])})
test_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_test[1])})
# mean MSE over entire field
ho_metrics = {}
ho_metrics.update({"mean_mse_x_xhat": np.mean(mse_map_ho[0])})
ho_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_map_ho[1])})
# mean R2 over entire field
ho_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_ho[0])})
ho_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_ho[1])})
metrics = {'test': test_metrics, 'ho': ho_metrics}
# print
print("Metrics:")
for entry in metrics:
print(entry)
print(metrics[entry])
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir, "metrics.json")
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if load_json:
# save metrics in json file
exp_jsons = os.listdir(results_path)
exp_json = [f for f in exp_jsons if config.id in f][0]
exp_json_path = os.path.join(results_path, exp_json)
results = utils.load_json(exp_json_path)
results[config.id]['linear_model_test'] = test_metrics
results[config.id]['linear_model_ho'] = ho_metrics
with open(exp_json_path, 'w') as result_file:
json.dump(results, result_file, sort_keys=True, indent=4)
# if precipitation data, transform back to original scale
if precip:
out_dir_orig = "{}_orig".format(out_dir)
te_inputs_2 = te_inputs ** 2
te_recons_2 = te_recons ** 2
te_xhatexp_2 = te_xhatexp ** 2
if offset:
te_inputs_2 = te_inputs_2 - 25
te_recons_2 = te_recons_2 - 25
te_xhatexp_2 = te_xhatexp_2 - 25
r2_maps_test_orig = eval_utils.plot_r2_map(te_inputs_2, te_recons_2,
te_xhatexp_2, out_dir_orig, "test_orig")
np.save(os.path.join(out_dir_orig, "r2map_test_orig_xxhat.npy"),
r2_maps_test_orig[0])
np.save(os.path.join(out_dir_orig, "r2map_test_orig_xxhatexp.npy"),
r2_maps_test_orig[1])
mse_maps_test_orig = eval_utils.plot_mse_map(te_inputs_2, te_recons_2,
te_xhatexp_2, out_dir_orig, "test_orig")
np.save(os.path.join(out_dir_orig, "mse_map_test_orig_xxhat.npy"),
mse_maps_test_orig[0])
np.save(os.path.join(out_dir_orig, "mse_map_test_orig_xxhatexp.npy"),
mse_maps_test_orig[1])
ho_inputs_2 = ho_inputs ** 2
ho_recons_2 = ho_recons ** 2
ho_xhatexp_2 = ho_xhatexp ** 2
if offset:
ho_inputs_2 = ho_inputs_2 - 25
ho_recons_2 = ho_recons_2 - 25
ho_xhatexp_2 = ho_xhatexp_2 - 25
r2_maps_ho_orig = eval_utils.plot_r2_map(ho_inputs_2, ho_recons_2, ho_xhatexp_2,
out_dir_orig, "holdout_orig")
np.save(os.path.join(out_dir_orig, "r2map_ho_orig_xxhat.npy"),
r2_maps_ho_orig[0])
np.save(os.path.join(out_dir_orig, "r2map_ho_orig_xxhatexp.npy"),
r2_maps_ho_orig[1])
mse_maps_ho_orig = eval_utils.plot_mse_map(ho_inputs_2, ho_recons_2,
ho_xhatexp_2, out_dir_orig, "holdout_orig")
np.save(os.path.join(out_dir_orig, "mse_map_ho_orig_xxhat.npy"),
mse_maps_ho_orig[0])
np.save(os.path.join(out_dir_orig, "mse_map_ho_orig_xxhatexp.npy"),
mse_maps_ho_orig[1])
# visualize reconstructions and interventions
imgs_te_orig = eval_utils.visualize(te_inputs, te_annos, model, reg,
out_dir_orig, "test_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "te_orig_x.npy"), imgs_te_orig[0])
np.save(os.path.join(out_dir_orig, "te_orig_xhat.npy"), imgs_te_orig[1])
np.save(os.path.join(out_dir_orig, "te_orig_xhatexp.npy"), imgs_te_orig[2])
imgs_ho_orig = eval_utils.visualize(ho_inputs, ho_annos, model, reg,
out_dir_orig, "holdout_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "ho_orig_x.npy"), imgs_ho_orig[0])
np.save(os.path.join(out_dir_orig, "ho_orig_xhat.npy"), imgs_ho_orig[1])
np.save(os.path.join(out_dir_orig, "ho_orig_xhatexp.npy"), imgs_ho_orig[2])
imgs_tr_orig = eval_utils.visualize(tr_inputs, tr_annos, model, reg,
out_dir_orig, "train_orig", transform_back=True, offset=offset)
np.save(os.path.join(out_dir_orig, "tr_orig_x.npy"), imgs_tr_orig[0])
np.save(os.path.join(out_dir_orig, "tr_orig_xhat.npy"), imgs_tr_orig[1])
np.save(os.path.join(out_dir_orig, "tr_orig_xhatexp.npy"), imgs_tr_orig[2])
#################
# save summaries of metrics maps
#################
# mean MSE over entire field
test_metrics = {}
test_metrics.update({"mean_mse_x_xhat": np.mean(mse_maps_test_orig[0])})
test_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_maps_test_orig[1])})
# mean R2 over entire field
test_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_test_orig[0])})
test_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_test_orig[1])})
# mean MSE over entire field
ho_metrics = {}
ho_metrics.update({"mean_mse_x_xhat": np.mean(mse_maps_ho_orig[0])})
ho_metrics.update({"mean_mse_x_xhatexp": np.mean(mse_maps_ho_orig[1])})
# mean R2 over entire field
ho_metrics.update({"mean_r2_x_xhat": np.mean(r2_maps_ho_orig[0])})
ho_metrics.update({"mean_r2_x_xhatexp": np.mean(r2_maps_ho_orig[1])})
# save metrics again in checkpoint dir
save_path = os.path.join(out_dir_orig, "metrics_orig.json")
metrics = {'test': test_metrics, 'ho': ho_metrics}
# print
print("Metrics:")
for entry in metrics:
print(entry)
print(metrics[entry])
with open(save_path, 'w') as result_file:
json.dump(metrics, result_file, sort_keys=True, indent=4)
if load_json:
# save metrics in json file
exp_jsons = os.listdir(results_path)
exp_json = [f for f in exp_jsons if config.id in f][0]
exp_json_path = os.path.join(results_path, exp_json)
results = utils.load_json(exp_json_path)
results[config.id]['linear_model_test_orig'] = test_metrics
results[config.id]['linear_model_ho_orig'] = ho_metrics
with open(exp_json_path, 'w') as result_file:
json.dump(results, result_file, sort_keys=True, indent=4)
if process_additional_holdout_members:
holdout_names = ["kbd", "kbf", "kbh", "kbj",
"kbl", "kbn", "kbo", "kbp", "kbr"]
# "kbt", "kbu", "kbv", "kbw", "kbx",
# "kby", "kbz", "kca", "kcb", "kcc",
# "kcd", "kce", "kcf", "kcg", "kch",
# "kci", "kcj", "kck", "kcl", "kcm",
# "kcn", "kco", "kcp", "kcq", "kcr",
# "kcs", "kct", "kcu", "kcv", "kcw", "kcx"]
holdout_datasets = {}
for ho in holdout_names:
holdout_datasets[ho] = input_anno(params=config,
mode="test_{}".format(ho),
repeat=False)
# process and save predictions for additional holdout datasets
results = process_holdout(holdout_datasets, model, reg, save_nc_files, out_dir)
for ho in results:
holdout_plots(results[ho], model, reg, ho, precip, offset, out_dir, out_dir_orig)
| en | 0.658688 | # get training data for linear latent space model # encodings_z = encodings['z'].numpy() # just keep a subset in memory # predict latents # decode predicted latents # predict latents for holdout set and decode # save #### Holdout ensemble: {}".format(label)) # save metrics again in checkpoint dir # Orig: {}".format(label)) # save metrics again in checkpoint dir # get configs from model # input function # dummy run - otherwise, the model wouldn't be fully build # restore model from checkpoint # get training data for linear latent space model # fit linear model # get test data # predict latents for test set and decode # get holdout data # predict latents for holdout set and decode # setup folder to save results # save ################# # plots ################# # R2 map # MSE map # visualize reconstructions and interventions -- random # visualize reconstructions and interventions -- quantiles ################# # save summaries of metrics maps ################# # mean MSE over entire field # mean R2 over entire field # mean MSE over entire field # mean R2 over entire field # print # save metrics again in checkpoint dir # save metrics in json file # if precipitation data, transform back to original scale # visualize reconstructions and interventions ################# # save summaries of metrics maps ################# # mean MSE over entire field # mean R2 over entire field # mean MSE over entire field # mean R2 over entire field # save metrics again in checkpoint dir # print # save metrics in json file # "kbt", "kbu", "kbv", "kbw", "kbx", # "kby", "kbz", "kca", "kcb", "kcc", # "kcd", "kce", "kcf", "kcg", "kch", # "kci", "kcj", "kck", "kcl", "kcm", # "kcn", "kco", "kcp", "kcq", "kcr", # "kcs", "kct", "kcu", "kcv", "kcw", "kcx"] # process and save predictions for additional holdout datasets | 2.262873 | 2 |
setup_scripts/enable_bungee_ip_forward.py | EniMiniGames/MC-Server-VPS-Scripts | 1 | 6614120 | <filename>setup_scripts/enable_bungee_ip_forward.py
#!/usr/bin/env python3
import yaml
from utils import validate_file_path
# Breaks yml file?
def set_bungee_mode(file_path: str):
with open(file_path, 'r') as cfg:
spig_yml = yaml.safe_load(cfg)
# except yaml.YAMLError as exc:
spig_yml['ip_forward'] = True
with open(file_path, 'w') as cfg:
yaml.dump(spig_yml, cfg)
if __name__ == '__main__':
path = validate_file_path('config.yml')
set_bungee_mode(path)
| <filename>setup_scripts/enable_bungee_ip_forward.py
#!/usr/bin/env python3
import yaml
from utils import validate_file_path
# Breaks yml file?
def set_bungee_mode(file_path: str):
with open(file_path, 'r') as cfg:
spig_yml = yaml.safe_load(cfg)
# except yaml.YAMLError as exc:
spig_yml['ip_forward'] = True
with open(file_path, 'w') as cfg:
yaml.dump(spig_yml, cfg)
if __name__ == '__main__':
path = validate_file_path('config.yml')
set_bungee_mode(path)
| en | 0.58485 | #!/usr/bin/env python3 # Breaks yml file? # except yaml.YAMLError as exc: | 2.326229 | 2 |
Python/notebooks/code/10-Multiprocessing.py | zhongyangynag/code-study | 0 | 6614121 | # ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# comment_magics: false
# formats: py:light,notebooks//ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# # Multiprocessing
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.951632Z", "iopub.execute_input": "2020-09-12T14:00:18.953819Z", "iopub.status.idle": "2020-09-12T14:00:18.957722Z", "shell.execute_reply": "2020-09-12T14:00:18.958270Z"}}
from multiprocessing import cpu_count
cpu_count()
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Map reduce example
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.964025Z", "iopub.execute_input": "2020-09-12T14:00:18.964908Z", "shell.execute_reply": "2020-09-12T14:00:18.967311Z", "iopub.status.idle": "2020-09-12T14:00:18.967841Z"}}
from time import sleep
def delayed_square(x):
sleep(1)
return x*x
data = list(range(8))
data
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.972191Z", "iopub.execute_input": "2020-09-12T14:00:18.972938Z", "iopub.status.idle": "2020-09-12T14:00:27.011091Z", "shell.execute_reply": "2020-09-12T14:00:27.011676Z"}}
%time sum(delayed_square(x) for x in data)
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:27.016032Z", "iopub.execute_input": "2020-09-12T14:00:27.016783Z", "shell.execute_reply": "2020-09-12T14:00:35.051096Z", "iopub.status.idle": "2020-09-12T14:00:35.051765Z"}}
%time sum(map(delayed_square,data))
# + [markdown] {"slideshow": {"slide_type": "fragment"}}
# We can process each `delayed_square` calls independently and in parallel. To accomplish this we'll apply that function across all list items in parallel using multiple processes.
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Thread and Process: Differences
#
# - A Process is an instance of a running program.
# - Process may contain one or more threads, but a thread cannot contain a process.
# - Process has a self-contained execution environment. It has its own memory space.
# - Application running on your computer may be a set of cooperating processes.
#
# - A Thread is made of and exist within a Process; every process has at least one.
# - Multiple threads in a process share resources, which helps in efficient communication between threads.
# - Threads can be concurrent on a multi-core system, with every core executing the separate threads simultaneously.
#
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multi-Processing vs Multi-Threading
#
# ### Memory
# - Each process has its own copy of the data segment of the parent process.
# - Each thread has direct access to the data segment of its process.
# - A process runs in separate memory spaces.
# - A thread runs in shared memory spaces.
#
# ### Communication
# - Processes must use inter-process communication to communicate with sibling processes.
# - Threads can directly communicate with other threads of its process.
#
# ### Overheads
# - Processes have considerable overhead.
# - Threads have almost no overhead.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multi-Processing vs Multi-Threading
#
# ### Creation
# - New processes require duplication of the parent process.
# - New threads are easily created.
#
# ### Control
# - Processes can only exercise control over child processes.
# - Threads can exercise considerable control over threads of the same process.
#
# ### Changes
# - Any change in the parent process does not affect child processes.
# - Any change in the main thread may affect the behavior of the other threads of the process.
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## The Global Interpreter Lock (GIL)
#
# - The Python interpreter is not thread safe.
# - A few critical internal data structures may only be accessed by one thread at a time. Access to them is protected by the GIL.
# - Attempts at removing the GIL from Python have failed until now. The main difficulty is maintaining the C API for extension modules.
# - Multiprocessing avoids the GIL by having separate processes which each have an independent copy of the interpreter data structures.
# - The price to pay: serialization of tasks, arguments, and results.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multiprocessing (history)
#
# - The multiprocessing allows the programmer to fully leverage multiple processors.
# - The `Pool` object parallelizes the execution of a function across multiple input values.
# - The if `__name__ == '__main__'` part is necessary.
# <p><font color=red> The next program does not work in a cell you need to save it and run with python in a terminal </font></p>
#
# ```bash
# python3 pool.py
# ```
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:35.056974Z", "iopub.execute_input": "2020-09-12T14:00:35.058212Z", "iopub.status.idle": "2020-09-12T14:00:35.060786Z", "shell.execute_reply": "2020-09-12T14:00:35.061358Z"}}
%%file pool.py
from time import time, sleep
from multiprocessing import Pool
def delayed_square(x):
sleep(1)
return x*x
if __name__ == '__main__': # Executed only on main process.
start = time()
data = list(range(8))
with Pool() as p:
result = sum(p.map(delayed_square, data))
stop = time()
print(f"result = {result} - Elapsed time {stop - start}")
# + {"execution": {"iopub.status.busy": "2020-09-12T14:00:35.065862Z", "iopub.execute_input": "2020-09-12T14:00:35.067034Z", "iopub.status.idle": "2020-09-12T14:00:37.354115Z", "shell.execute_reply": "2020-09-12T14:00:37.354706Z"}}
import sys
!{sys.executable} pool.py
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Futures
#
# The `concurrent.futures` module provides a high-level interface for asynchronously executing callables.
#
# The asynchronous execution can be performed with threads, using ThreadPoolExecutor, or separate processes, using ProcessPoolExecutor. Both implement the same interface, which is defined by the abstract Executor class.
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.359908Z", "iopub.execute_input": "2020-09-12T14:00:37.360930Z", "iopub.status.idle": "2020-09-12T14:00:37.363397Z", "shell.execute_reply": "2020-09-12T14:00:37.364052Z"}}
%%file process_pool.py
import os
from time import time, sleep
if os.name == "nt":
from loky import ProcessPoolExecutor # for Windows users
else:
from concurrent.futures import ProcessPoolExecutor
from time import time, sleep
def delayed_square(x):
sleep(1)
return x*x
if __name__ == "__main__":
start = time()
data = list(range(8))
with ProcessPoolExecutor() as pool:
result = sum(pool.map(delayed_square, data))
stop = time()
print(f" result : {result} - elapsed time {stop - start}")
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.368389Z", "iopub.execute_input": "2020-09-12T14:00:37.369466Z", "iopub.status.idle": "2020-09-12T14:00:39.614789Z", "shell.execute_reply": "2020-09-12T14:00:39.615379Z"}}
!{sys.executable} process_pool.py
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:39.620798Z", "iopub.execute_input": "2020-09-12T14:00:39.621801Z", "iopub.status.idle": "2020-09-12T14:00:40.633249Z", "shell.execute_reply": "2020-09-12T14:00:40.634198Z"}}
%%time
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor()
results = list(e.map(delayed_square, range(8)))
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Asynchronous Future
# While many parallel applications can be described as maps, some can be more complex. In this section we look at the asynchronous Future interface, which provides a simple API for ad-hoc parallelism. This is useful for when your computations don't fit a regular pattern.
#
# + [markdown] {"slideshow": {"slide_type": "fragment"}}
# ### Executor.submit
#
# The `submit` method starts a computation in a separate thread or process and immediately gives us a `Future` object that refers to the result. At first, the future is pending. Once the function completes the future is finished.
#
# We collect the result of the task with the `.result()` method,
# which does not return until the results are available.
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.639327Z", "iopub.execute_input": "2020-09-12T14:00:40.640320Z", "iopub.status.idle": "2020-09-12T14:00:40.642032Z", "shell.execute_reply": "2020-09-12T14:00:40.642728Z"}}
from time import sleep
def slowadd(a, b, delay=1):
sleep(delay)
return a + b
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.650795Z", "iopub.execute_input": "2020-09-12T14:00:40.651924Z", "iopub.status.idle": "2020-09-12T14:00:40.654265Z", "shell.execute_reply": "2020-09-12T14:00:40.654828Z"}}
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(4)
future = e.submit(slowadd, 1, 2)
future
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.658819Z", "iopub.execute_input": "2020-09-12T14:00:40.659582Z", "iopub.status.idle": "2020-09-12T14:00:41.655950Z", "shell.execute_reply": "2020-09-12T14:00:41.656512Z"}}
future.result()
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# Submit many tasks all at once and they be will executed in parallel.
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:41.660798Z", "iopub.execute_input": "2020-09-12T14:00:41.661553Z", "shell.execute_reply": "2020-09-12T14:00:49.683871Z", "iopub.status.idle": "2020-09-12T14:00:49.684457Z"}}
%%time
results = [slowadd(i, i, delay=1) for i in range(8)]
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:49.691017Z", "iopub.execute_input": "2020-09-12T14:00:49.691871Z", "iopub.status.idle": "2020-09-12T14:00:51.702264Z", "shell.execute_reply": "2020-09-12T14:00:51.702861Z"}}
%%time
futures = [e.submit(slowadd, 1, 1, delay=1) for i in range(8)]
results = [f.result() for f in futures]
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# * Submit fires off a single function call in the background, returning a future.
# * When you combine submit with a single for loop we recover the functionality of map.
# * To collect your results, replace each of futures, `f`, with a call to `f.result()`
# * Combine submit with multiple for loops and other general programming to get something more general than map.
# * Sometimes, it did not speed up the code very much
# * Threads and processes show some performance differences
# * Use threads carefully, you can break your Python session.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# Today most library designers are coordinating around the concurrent.futures interface, so it's wise to move over.
#
# * Profile your code
# * Used concurrent.futures.ProcessPoolExecutor for simple parallelism
# * Gained some speed boost (but not as much as expected)
# * Lost ability to diagnose performance within parallel code
# * Describing each task as a function call helps use tools like map for parallelism
# * Making your tasks fast is often at least as important as parallelizing your tasks.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ### Exercise: Pi computation
#
# Parallelize this computation with a ProcessPoolExecutor. ThreadPoolExecutor is not usable because of `random` function calls.
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:51.746931Z", "iopub.execute_input": "2020-09-12T14:00:51.789213Z", "iopub.status.idle": "2020-09-12T14:01:09.629723Z", "shell.execute_reply": "2020-09-12T14:01:09.630334Z"}}
import time
import random
def compute_pi(n):
count = 0
for i in range(n):
x = random.random()
y = random.random()
if x*x + y*y <= 1:
count += 1
return count
elapsed_time = time.time()
nb_simulations = 4
n = 10**7
result = [compute_pi(n) for i in range(nb_simulations)]
pi = 4 * sum(result) / (n*nb_simulations)
print(f"Estimated value of Pi : {pi:.8f} time : {time.time()-elapsed_time:.8f}")
# -
# ### Exercise
#
# - Do the same computation using asynchronous future
# - Implement a joblib version (see example below)
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Parallel tools for Python
#
# The parallel tools from standard library are very limited. You will have more powerful features with:
#
# - [Joblib](https://joblib.readthedocs.io/en/latest/) provides a simple helper class to write parallel for loops using multiprocessing.
# - [Dask](https://dask.org)
# - [PySpark](https://spark.apache.org/docs/latest/api/python/index.html)
# - [mpi4py](https://mpi4py.readthedocs.io)
#
| # ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# comment_magics: false
# formats: py:light,notebooks//ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# # Multiprocessing
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.951632Z", "iopub.execute_input": "2020-09-12T14:00:18.953819Z", "iopub.status.idle": "2020-09-12T14:00:18.957722Z", "shell.execute_reply": "2020-09-12T14:00:18.958270Z"}}
from multiprocessing import cpu_count
cpu_count()
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Map reduce example
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.964025Z", "iopub.execute_input": "2020-09-12T14:00:18.964908Z", "shell.execute_reply": "2020-09-12T14:00:18.967311Z", "iopub.status.idle": "2020-09-12T14:00:18.967841Z"}}
from time import sleep
def delayed_square(x):
sleep(1)
return x*x
data = list(range(8))
data
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.972191Z", "iopub.execute_input": "2020-09-12T14:00:18.972938Z", "iopub.status.idle": "2020-09-12T14:00:27.011091Z", "shell.execute_reply": "2020-09-12T14:00:27.011676Z"}}
%time sum(delayed_square(x) for x in data)
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:27.016032Z", "iopub.execute_input": "2020-09-12T14:00:27.016783Z", "shell.execute_reply": "2020-09-12T14:00:35.051096Z", "iopub.status.idle": "2020-09-12T14:00:35.051765Z"}}
%time sum(map(delayed_square,data))
# + [markdown] {"slideshow": {"slide_type": "fragment"}}
# We can process each `delayed_square` calls independently and in parallel. To accomplish this we'll apply that function across all list items in parallel using multiple processes.
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Thread and Process: Differences
#
# - A Process is an instance of a running program.
# - Process may contain one or more threads, but a thread cannot contain a process.
# - Process has a self-contained execution environment. It has its own memory space.
# - Application running on your computer may be a set of cooperating processes.
#
# - A Thread is made of and exist within a Process; every process has at least one.
# - Multiple threads in a process share resources, which helps in efficient communication between threads.
# - Threads can be concurrent on a multi-core system, with every core executing the separate threads simultaneously.
#
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multi-Processing vs Multi-Threading
#
# ### Memory
# - Each process has its own copy of the data segment of the parent process.
# - Each thread has direct access to the data segment of its process.
# - A process runs in separate memory spaces.
# - A thread runs in shared memory spaces.
#
# ### Communication
# - Processes must use inter-process communication to communicate with sibling processes.
# - Threads can directly communicate with other threads of its process.
#
# ### Overheads
# - Processes have considerable overhead.
# - Threads have almost no overhead.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multi-Processing vs Multi-Threading
#
# ### Creation
# - New processes require duplication of the parent process.
# - New threads are easily created.
#
# ### Control
# - Processes can only exercise control over child processes.
# - Threads can exercise considerable control over threads of the same process.
#
# ### Changes
# - Any change in the parent process does not affect child processes.
# - Any change in the main thread may affect the behavior of the other threads of the process.
#
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## The Global Interpreter Lock (GIL)
#
# - The Python interpreter is not thread safe.
# - A few critical internal data structures may only be accessed by one thread at a time. Access to them is protected by the GIL.
# - Attempts at removing the GIL from Python have failed until now. The main difficulty is maintaining the C API for extension modules.
# - Multiprocessing avoids the GIL by having separate processes which each have an independent copy of the interpreter data structures.
# - The price to pay: serialization of tasks, arguments, and results.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Multiprocessing (history)
#
# - The multiprocessing allows the programmer to fully leverage multiple processors.
# - The `Pool` object parallelizes the execution of a function across multiple input values.
# - The if `__name__ == '__main__'` part is necessary.
# <p><font color=red> The next program does not work in a cell you need to save it and run with python in a terminal </font></p>
#
# ```bash
# python3 pool.py
# ```
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:35.056974Z", "iopub.execute_input": "2020-09-12T14:00:35.058212Z", "iopub.status.idle": "2020-09-12T14:00:35.060786Z", "shell.execute_reply": "2020-09-12T14:00:35.061358Z"}}
%%file pool.py
from time import time, sleep
from multiprocessing import Pool
def delayed_square(x):
sleep(1)
return x*x
if __name__ == '__main__': # Executed only on main process.
start = time()
data = list(range(8))
with Pool() as p:
result = sum(p.map(delayed_square, data))
stop = time()
print(f"result = {result} - Elapsed time {stop - start}")
# + {"execution": {"iopub.status.busy": "2020-09-12T14:00:35.065862Z", "iopub.execute_input": "2020-09-12T14:00:35.067034Z", "iopub.status.idle": "2020-09-12T14:00:37.354115Z", "shell.execute_reply": "2020-09-12T14:00:37.354706Z"}}
import sys
!{sys.executable} pool.py
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Futures
#
# The `concurrent.futures` module provides a high-level interface for asynchronously executing callables.
#
# The asynchronous execution can be performed with threads, using ThreadPoolExecutor, or separate processes, using ProcessPoolExecutor. Both implement the same interface, which is defined by the abstract Executor class.
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.359908Z", "iopub.execute_input": "2020-09-12T14:00:37.360930Z", "iopub.status.idle": "2020-09-12T14:00:37.363397Z", "shell.execute_reply": "2020-09-12T14:00:37.364052Z"}}
%%file process_pool.py
import os
from time import time, sleep
if os.name == "nt":
from loky import ProcessPoolExecutor # for Windows users
else:
from concurrent.futures import ProcessPoolExecutor
from time import time, sleep
def delayed_square(x):
sleep(1)
return x*x
if __name__ == "__main__":
start = time()
data = list(range(8))
with ProcessPoolExecutor() as pool:
result = sum(pool.map(delayed_square, data))
stop = time()
print(f" result : {result} - elapsed time {stop - start}")
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.368389Z", "iopub.execute_input": "2020-09-12T14:00:37.369466Z", "iopub.status.idle": "2020-09-12T14:00:39.614789Z", "shell.execute_reply": "2020-09-12T14:00:39.615379Z"}}
!{sys.executable} process_pool.py
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:39.620798Z", "iopub.execute_input": "2020-09-12T14:00:39.621801Z", "iopub.status.idle": "2020-09-12T14:00:40.633249Z", "shell.execute_reply": "2020-09-12T14:00:40.634198Z"}}
%%time
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor()
results = list(e.map(delayed_square, range(8)))
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Asynchronous Future
# While many parallel applications can be described as maps, some can be more complex. In this section we look at the asynchronous Future interface, which provides a simple API for ad-hoc parallelism. This is useful for when your computations don't fit a regular pattern.
#
# + [markdown] {"slideshow": {"slide_type": "fragment"}}
# ### Executor.submit
#
# The `submit` method starts a computation in a separate thread or process and immediately gives us a `Future` object that refers to the result. At first, the future is pending. Once the function completes the future is finished.
#
# We collect the result of the task with the `.result()` method,
# which does not return until the results are available.
# + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.639327Z", "iopub.execute_input": "2020-09-12T14:00:40.640320Z", "iopub.status.idle": "2020-09-12T14:00:40.642032Z", "shell.execute_reply": "2020-09-12T14:00:40.642728Z"}}
from time import sleep
def slowadd(a, b, delay=1):
sleep(delay)
return a + b
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.650795Z", "iopub.execute_input": "2020-09-12T14:00:40.651924Z", "iopub.status.idle": "2020-09-12T14:00:40.654265Z", "shell.execute_reply": "2020-09-12T14:00:40.654828Z"}}
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(4)
future = e.submit(slowadd, 1, 2)
future
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.658819Z", "iopub.execute_input": "2020-09-12T14:00:40.659582Z", "iopub.status.idle": "2020-09-12T14:00:41.655950Z", "shell.execute_reply": "2020-09-12T14:00:41.656512Z"}}
future.result()
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# Submit many tasks all at once and they be will executed in parallel.
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:41.660798Z", "iopub.execute_input": "2020-09-12T14:00:41.661553Z", "shell.execute_reply": "2020-09-12T14:00:49.683871Z", "iopub.status.idle": "2020-09-12T14:00:49.684457Z"}}
%%time
results = [slowadd(i, i, delay=1) for i in range(8)]
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:49.691017Z", "iopub.execute_input": "2020-09-12T14:00:49.691871Z", "iopub.status.idle": "2020-09-12T14:00:51.702264Z", "shell.execute_reply": "2020-09-12T14:00:51.702861Z"}}
%%time
futures = [e.submit(slowadd, 1, 1, delay=1) for i in range(8)]
results = [f.result() for f in futures]
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# * Submit fires off a single function call in the background, returning a future.
# * When you combine submit with a single for loop we recover the functionality of map.
# * To collect your results, replace each of futures, `f`, with a call to `f.result()`
# * Combine submit with multiple for loops and other general programming to get something more general than map.
# * Sometimes, it did not speed up the code very much
# * Threads and processes show some performance differences
# * Use threads carefully, you can break your Python session.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# Today most library designers are coordinating around the concurrent.futures interface, so it's wise to move over.
#
# * Profile your code
# * Used concurrent.futures.ProcessPoolExecutor for simple parallelism
# * Gained some speed boost (but not as much as expected)
# * Lost ability to diagnose performance within parallel code
# * Describing each task as a function call helps use tools like map for parallelism
# * Making your tasks fast is often at least as important as parallelizing your tasks.
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ### Exercise: Pi computation
#
# Parallelize this computation with a ProcessPoolExecutor. ThreadPoolExecutor is not usable because of `random` function calls.
# + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:51.746931Z", "iopub.execute_input": "2020-09-12T14:00:51.789213Z", "iopub.status.idle": "2020-09-12T14:01:09.629723Z", "shell.execute_reply": "2020-09-12T14:01:09.630334Z"}}
import time
import random
def compute_pi(n):
count = 0
for i in range(n):
x = random.random()
y = random.random()
if x*x + y*y <= 1:
count += 1
return count
elapsed_time = time.time()
nb_simulations = 4
n = 10**7
result = [compute_pi(n) for i in range(nb_simulations)]
pi = 4 * sum(result) / (n*nb_simulations)
print(f"Estimated value of Pi : {pi:.8f} time : {time.time()-elapsed_time:.8f}")
# -
# ### Exercise
#
# - Do the same computation using asynchronous future
# - Implement a joblib version (see example below)
# + [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Parallel tools for Python
#
# The parallel tools from standard library are very limited. You will have more powerful features with:
#
# - [Joblib](https://joblib.readthedocs.io/en/latest/) provides a simple helper class to write parallel for loops using multiprocessing.
# - [Dask](https://dask.org)
# - [PySpark](https://spark.apache.org/docs/latest/api/python/index.html)
# - [mpi4py](https://mpi4py.readthedocs.io)
#
| en | 0.702321 | # --- # jupyter: # jupytext: # cell_metadata_json: true # comment_magics: false # formats: py:light,notebooks//ipynb # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.6.0 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] {"slideshow": {"slide_type": "slide"}} # # Multiprocessing # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.951632Z", "iopub.execute_input": "2020-09-12T14:00:18.953819Z", "iopub.status.idle": "2020-09-12T14:00:18.957722Z", "shell.execute_reply": "2020-09-12T14:00:18.958270Z"}} # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Map reduce example # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.964025Z", "iopub.execute_input": "2020-09-12T14:00:18.964908Z", "shell.execute_reply": "2020-09-12T14:00:18.967311Z", "iopub.status.idle": "2020-09-12T14:00:18.967841Z"}} # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:18.972191Z", "iopub.execute_input": "2020-09-12T14:00:18.972938Z", "iopub.status.idle": "2020-09-12T14:00:27.011091Z", "shell.execute_reply": "2020-09-12T14:00:27.011676Z"}} # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:27.016032Z", "iopub.execute_input": "2020-09-12T14:00:27.016783Z", "shell.execute_reply": "2020-09-12T14:00:35.051096Z", "iopub.status.idle": "2020-09-12T14:00:35.051765Z"}} # + [markdown] {"slideshow": {"slide_type": "fragment"}} # We can process each `delayed_square` calls independently and in parallel. To accomplish this we'll apply that function across all list items in parallel using multiple processes. # # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Thread and Process: Differences # # - A Process is an instance of a running program. # - Process may contain one or more threads, but a thread cannot contain a process. # - Process has a self-contained execution environment. It has its own memory space. # - Application running on your computer may be a set of cooperating processes. # # - A Thread is made of and exist within a Process; every process has at least one. # - Multiple threads in a process share resources, which helps in efficient communication between threads. # - Threads can be concurrent on a multi-core system, with every core executing the separate threads simultaneously. # # # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Multi-Processing vs Multi-Threading # # ### Memory # - Each process has its own copy of the data segment of the parent process. # - Each thread has direct access to the data segment of its process. # - A process runs in separate memory spaces. # - A thread runs in shared memory spaces. # # ### Communication # - Processes must use inter-process communication to communicate with sibling processes. # - Threads can directly communicate with other threads of its process. # # ### Overheads # - Processes have considerable overhead. # - Threads have almost no overhead. # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Multi-Processing vs Multi-Threading # # ### Creation # - New processes require duplication of the parent process. # - New threads are easily created. # # ### Control # - Processes can only exercise control over child processes. # - Threads can exercise considerable control over threads of the same process. # # ### Changes # - Any change in the parent process does not affect child processes. # - Any change in the main thread may affect the behavior of the other threads of the process. # # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## The Global Interpreter Lock (GIL) # # - The Python interpreter is not thread safe. # - A few critical internal data structures may only be accessed by one thread at a time. Access to them is protected by the GIL. # - Attempts at removing the GIL from Python have failed until now. The main difficulty is maintaining the C API for extension modules. # - Multiprocessing avoids the GIL by having separate processes which each have an independent copy of the interpreter data structures. # - The price to pay: serialization of tasks, arguments, and results. # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Multiprocessing (history) # # - The multiprocessing allows the programmer to fully leverage multiple processors. # - The `Pool` object parallelizes the execution of a function across multiple input values. # - The if `__name__ == '__main__'` part is necessary. # <p><font color=red> The next program does not work in a cell you need to save it and run with python in a terminal </font></p> # # ```bash # python3 pool.py # ``` # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:35.056974Z", "iopub.execute_input": "2020-09-12T14:00:35.058212Z", "iopub.status.idle": "2020-09-12T14:00:35.060786Z", "shell.execute_reply": "2020-09-12T14:00:35.061358Z"}} # Executed only on main process. # + {"execution": {"iopub.status.busy": "2020-09-12T14:00:35.065862Z", "iopub.execute_input": "2020-09-12T14:00:35.067034Z", "iopub.status.idle": "2020-09-12T14:00:37.354115Z", "shell.execute_reply": "2020-09-12T14:00:37.354706Z"}} # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Futures # # The `concurrent.futures` module provides a high-level interface for asynchronously executing callables. # # The asynchronous execution can be performed with threads, using ThreadPoolExecutor, or separate processes, using ProcessPoolExecutor. Both implement the same interface, which is defined by the abstract Executor class. # + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.359908Z", "iopub.execute_input": "2020-09-12T14:00:37.360930Z", "iopub.status.idle": "2020-09-12T14:00:37.363397Z", "shell.execute_reply": "2020-09-12T14:00:37.364052Z"}} # for Windows users # + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:37.368389Z", "iopub.execute_input": "2020-09-12T14:00:37.369466Z", "iopub.status.idle": "2020-09-12T14:00:39.614789Z", "shell.execute_reply": "2020-09-12T14:00:39.615379Z"}} # + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:39.620798Z", "iopub.execute_input": "2020-09-12T14:00:39.621801Z", "iopub.status.idle": "2020-09-12T14:00:40.633249Z", "shell.execute_reply": "2020-09-12T14:00:40.634198Z"}} # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Asynchronous Future # While many parallel applications can be described as maps, some can be more complex. In this section we look at the asynchronous Future interface, which provides a simple API for ad-hoc parallelism. This is useful for when your computations don't fit a regular pattern. # # + [markdown] {"slideshow": {"slide_type": "fragment"}} # ### Executor.submit # # The `submit` method starts a computation in a separate thread or process and immediately gives us a `Future` object that refers to the result. At first, the future is pending. Once the function completes the future is finished. # # We collect the result of the task with the `.result()` method, # which does not return until the results are available. # + {"slideshow": {"slide_type": "slide"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.639327Z", "iopub.execute_input": "2020-09-12T14:00:40.640320Z", "iopub.status.idle": "2020-09-12T14:00:40.642032Z", "shell.execute_reply": "2020-09-12T14:00:40.642728Z"}} # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.650795Z", "iopub.execute_input": "2020-09-12T14:00:40.651924Z", "iopub.status.idle": "2020-09-12T14:00:40.654265Z", "shell.execute_reply": "2020-09-12T14:00:40.654828Z"}} # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:40.658819Z", "iopub.execute_input": "2020-09-12T14:00:40.659582Z", "iopub.status.idle": "2020-09-12T14:00:41.655950Z", "shell.execute_reply": "2020-09-12T14:00:41.656512Z"}} # + [markdown] {"slideshow": {"slide_type": "slide"}} # Submit many tasks all at once and they be will executed in parallel. # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:41.660798Z", "iopub.execute_input": "2020-09-12T14:00:41.661553Z", "shell.execute_reply": "2020-09-12T14:00:49.683871Z", "iopub.status.idle": "2020-09-12T14:00:49.684457Z"}} # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:49.691017Z", "iopub.execute_input": "2020-09-12T14:00:49.691871Z", "iopub.status.idle": "2020-09-12T14:00:51.702264Z", "shell.execute_reply": "2020-09-12T14:00:51.702861Z"}} # + [markdown] {"slideshow": {"slide_type": "slide"}} # * Submit fires off a single function call in the background, returning a future. # * When you combine submit with a single for loop we recover the functionality of map. # * To collect your results, replace each of futures, `f`, with a call to `f.result()` # * Combine submit with multiple for loops and other general programming to get something more general than map. # * Sometimes, it did not speed up the code very much # * Threads and processes show some performance differences # * Use threads carefully, you can break your Python session. # + [markdown] {"slideshow": {"slide_type": "slide"}} # Today most library designers are coordinating around the concurrent.futures interface, so it's wise to move over. # # * Profile your code # * Used concurrent.futures.ProcessPoolExecutor for simple parallelism # * Gained some speed boost (but not as much as expected) # * Lost ability to diagnose performance within parallel code # * Describing each task as a function call helps use tools like map for parallelism # * Making your tasks fast is often at least as important as parallelizing your tasks. # + [markdown] {"slideshow": {"slide_type": "slide"}} # ### Exercise: Pi computation # # Parallelize this computation with a ProcessPoolExecutor. ThreadPoolExecutor is not usable because of `random` function calls. # + {"slideshow": {"slide_type": "fragment"}, "execution": {"iopub.status.busy": "2020-09-12T14:00:51.746931Z", "iopub.execute_input": "2020-09-12T14:00:51.789213Z", "iopub.status.idle": "2020-09-12T14:01:09.629723Z", "shell.execute_reply": "2020-09-12T14:01:09.630334Z"}} # - # ### Exercise # # - Do the same computation using asynchronous future # - Implement a joblib version (see example below) # + [markdown] {"slideshow": {"slide_type": "slide"}} # ## Parallel tools for Python # # The parallel tools from standard library are very limited. You will have more powerful features with: # # - [Joblib](https://joblib.readthedocs.io/en/latest/) provides a simple helper class to write parallel for loops using multiprocessing. # - [Dask](https://dask.org) # - [PySpark](https://spark.apache.org/docs/latest/api/python/index.html) # - [mpi4py](https://mpi4py.readthedocs.io) # | 2.316445 | 2 |
trial.py | pradyunsg/CSE2003-Project | 0 | 6614122 | <reponame>pradyunsg/CSE2003-Project
"""
"""
import satsolver
FILENAME = "trail1.sat.txt"
def main():
with open(FILENAME) as f:
instance = satsolver.Instance.from_file(f)
solver = satsolver.SATSolver(instance)
solutions = solver.recursive_solve()
found_solution = False
for satisfying_assignments in solutions:
found_solution = True
print(instance.assignment_to_string(
satisfying_assignments, brief=False, starting_with=''
))
if found_solution:
print("SAT")
else:
print("UNSAT")
if __name__ == '__main__':
main()
| """
"""
import satsolver
FILENAME = "trail1.sat.txt"
def main():
with open(FILENAME) as f:
instance = satsolver.Instance.from_file(f)
solver = satsolver.SATSolver(instance)
solutions = solver.recursive_solve()
found_solution = False
for satisfying_assignments in solutions:
found_solution = True
print(instance.assignment_to_string(
satisfying_assignments, brief=False, starting_with=''
))
if found_solution:
print("SAT")
else:
print("UNSAT")
if __name__ == '__main__':
main() | none | 1 | 3.371318 | 3 | |
sparse_causal_model_learner_rl/trainable/test_combined.py | sergeivolodin/causality-disentanglement-rl | 2 | 6614123 | <reponame>sergeivolodin/causality-disentanglement-rl
import os
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from tqdm.auto import tqdm
import pytest
import torch
from torch import nn
import gin
from sparse_causal_model_learner_rl.trainable.combined import FCCombinedModel, CombinedQuadraticLayer
from sparse_causal_model_learner_rl.trainable.quadratic_neuron import Quadratic
use_cuda = False
def get_model():
m = torch.nn.Sequential(
torch.nn.Linear(in_features=24, out_features=60,),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=60),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=60),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=1),
)
return m
class AllModels(nn.Module):
def __init__(self, n_models):
super(AllModels, self).__init__()
self.models = []
self.model_list = []
for m in range(n_models):
m_name = 'm%02d' % m
model = get_model()
setattr(self, m_name, model)
self.models.append(m_name)
self.model_list.append(model)
def forward(self, data, do_parallel=False):
results = [getattr(self, m)(data) for m in self.models]
return torch.cat(results, dim=1)
@pytest.mark.parametrize('n_models,in_f,out_f,batch', [(3, 5, 6, 100), (3, 3, 3, 3), (1, 1, 1, 1)])
def test_combined_quadratic(n_models, in_f, out_f, batch):
ind_models = [Quadratic(in_features=in_f, out_features=out_f) for _ in range(n_models)]
combined = CombinedQuadraticLayer(in_features=in_f, out_features=out_f, n_models=n_models)
for m in ind_models:
m.w.data = torch.randn(*m.w.shape)
m.W.data = torch.randn(*m.W.shape)
m.b.data = torch.randn(*m.b.shape)
for i, m in enumerate(ind_models):
combined.bias.data[:, i] = m.b
combined.weight.data[:, :, i] = m.w
combined.qweight.data[:, :, :, i] = m.W
inp = torch.randn(batch, in_f, n_models)
ind_out = [m(inp[:, :, idx]) for idx, m in enumerate(ind_models)]
comb_out = combined(inp)
assert comb_out.shape == (batch, out_f, n_models)
for i in range(n_models):
assert torch.allclose(comb_out[:, :, i], ind_out[i], atol=1e-5)
@pytest.mark.parametrize("n_models", [10, 20, 30])
def test_combined_inp_outp(n_models):
data = torch.randn(1000, 24)
M = AllModels(n_models)
C = FCCombinedModel(hidden_sizes=[60, 60, 60], input_shape=(24,), n_models=n_models, output_shape=(1,),
activation_cls=torch.nn.Tanh)
def print_with_shape(dct):
dct = dict(dct)
dct_shape = {x: y.shape for x, y in dct.items()}
print(dct_shape)
print_with_shape(M.named_parameters())
print_with_shape(C.named_parameters())
for n, p in M.named_parameters():
model_n = int(n[1:3])
layer_id = int(n.split('.')[1]) // 2
is_bias = 'bias' in n
print(n, model_n, is_bias, layer_id)
target_param = C.fc[layer_id]
if is_bias:
target_param.bias.data[:, model_n] = p
else:
target_param.weight.data[:, :, model_n] = p
outC = C(data.view(-1, 24, 1).expand(-1, -1, n_models))
outM = M(data)
outC = outC.detach().cpu().numpy()
outM = outM.detach().cpu().numpy()
print(outC.mean(), outM.mean())
delta = np.abs(outC - outM)
delta = np.mean(delta)
print(delta)
assert delta < 1e-7
| import os
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from tqdm.auto import tqdm
import pytest
import torch
from torch import nn
import gin
from sparse_causal_model_learner_rl.trainable.combined import FCCombinedModel, CombinedQuadraticLayer
from sparse_causal_model_learner_rl.trainable.quadratic_neuron import Quadratic
use_cuda = False
def get_model():
m = torch.nn.Sequential(
torch.nn.Linear(in_features=24, out_features=60,),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=60),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=60),
torch.nn.Tanh(),
torch.nn.Linear(in_features=60, out_features=1),
)
return m
class AllModels(nn.Module):
def __init__(self, n_models):
super(AllModels, self).__init__()
self.models = []
self.model_list = []
for m in range(n_models):
m_name = 'm%02d' % m
model = get_model()
setattr(self, m_name, model)
self.models.append(m_name)
self.model_list.append(model)
def forward(self, data, do_parallel=False):
results = [getattr(self, m)(data) for m in self.models]
return torch.cat(results, dim=1)
@pytest.mark.parametrize('n_models,in_f,out_f,batch', [(3, 5, 6, 100), (3, 3, 3, 3), (1, 1, 1, 1)])
def test_combined_quadratic(n_models, in_f, out_f, batch):
ind_models = [Quadratic(in_features=in_f, out_features=out_f) for _ in range(n_models)]
combined = CombinedQuadraticLayer(in_features=in_f, out_features=out_f, n_models=n_models)
for m in ind_models:
m.w.data = torch.randn(*m.w.shape)
m.W.data = torch.randn(*m.W.shape)
m.b.data = torch.randn(*m.b.shape)
for i, m in enumerate(ind_models):
combined.bias.data[:, i] = m.b
combined.weight.data[:, :, i] = m.w
combined.qweight.data[:, :, :, i] = m.W
inp = torch.randn(batch, in_f, n_models)
ind_out = [m(inp[:, :, idx]) for idx, m in enumerate(ind_models)]
comb_out = combined(inp)
assert comb_out.shape == (batch, out_f, n_models)
for i in range(n_models):
assert torch.allclose(comb_out[:, :, i], ind_out[i], atol=1e-5)
@pytest.mark.parametrize("n_models", [10, 20, 30])
def test_combined_inp_outp(n_models):
data = torch.randn(1000, 24)
M = AllModels(n_models)
C = FCCombinedModel(hidden_sizes=[60, 60, 60], input_shape=(24,), n_models=n_models, output_shape=(1,),
activation_cls=torch.nn.Tanh)
def print_with_shape(dct):
dct = dict(dct)
dct_shape = {x: y.shape for x, y in dct.items()}
print(dct_shape)
print_with_shape(M.named_parameters())
print_with_shape(C.named_parameters())
for n, p in M.named_parameters():
model_n = int(n[1:3])
layer_id = int(n.split('.')[1]) // 2
is_bias = 'bias' in n
print(n, model_n, is_bias, layer_id)
target_param = C.fc[layer_id]
if is_bias:
target_param.bias.data[:, model_n] = p
else:
target_param.weight.data[:, :, model_n] = p
outC = C(data.view(-1, 24, 1).expand(-1, -1, n_models))
outM = M(data)
outC = outC.detach().cpu().numpy()
outM = outM.detach().cpu().numpy()
print(outC.mean(), outM.mean())
delta = np.abs(outC - outM)
delta = np.mean(delta)
print(delta)
assert delta < 1e-7 | none | 1 | 2.135485 | 2 | |
latexconvertmd/LaTeX.py | DavidCouronne/LaTeXConvertMD | 0 | 6614124 | #!/usr/local/bin/python
# -*- coding:utf-8 -*-
# Auteur: <NAME>
# Convertion automatique de LaTeX en Markdown
import codecs
import os
import re
from latexconvertmd import LaTeXCommands, config
from slugify import slugify
from TexSoup import TexSoup
class Source:
def __init__(self, original="", exportFolder=config.outputFolder, file=False):
self.original = original # On garde l'original pour développement
self.contenu = original
self.manipFiles = True # Manipulation de fichiers
self.lines = self.contenu.splitlines()
self.exportFolder = exportFolder
self.nbfigure = 0
self.exo = 0
if file != False:
self.outputFolder = file
def collapseLines(self):
"""Recolle les lignes dans self.contenu"""
self.contenu = "\n".join(self.lines)
def cleanSpace(self):
"""Agit sur les lignes.
Enlève les espaces en début et fin de chaque ligne"""
new_lines = []
for line in self.lines:
line = line.strip()
new_lines.append(line)
self.lines = new_lines
def cleanRem(self):
"""Agit sur les lignes.
Enlèves toutes les remarques %"""
self.collapseLines() # On commence par protégéer les \%
self.contenu = self.contenu.replace("\\%", "!!rem!!")
self.lines = self.contenu.splitlines()
new_lines = []
for line in self.lines:
remarque = line.find("%")
if remarque != -1:
line = line[:remarque]
new_lines.append(line)
self.lines = new_lines
self.collapseLines()
self.contenu = self.contenu.replace("!!rem!!", "\\%")
self.lines = self.contenu.splitlines()
def cleanLines(self):
"""Agit sur le contenu.
Supprime les lignes vides"""
while "\n\n\n" in self.contenu:
self.contenu = self.contenu.replace("\n\n\n", "\n\n")
def cleanCommand(self):
"""Agit sur le contenu.
Supprime toutes les commandes de listeCommandesClean et delCommand du fichier config.py
Et au passage gère les sections et subsections"""
soup = TexSoup(self.contenu)
for section in soup.find_all('section'):
section.replace('## '+section.string)
for subsection in soup.find_all('subsection'):
subsection.replace('### '+subsection.string)
for command in config.delCommands:
for include in soup.find_all(command):
include.delete()
self.contenu = repr(soup)
for command in config.listeCommandesClean:
self.contenu = re.sub(command.regex, "", self.contenu)
self.lines = self.contenu.splitlines()
def cleanLayout(self):
"""Agit sur le contenu.
Supprime toutes les commandes de listeLayout du fichier config.py"""
for command in config.listeCommandesLayout:
self.contenu = command.cleanCommand(self.contenu)
def replaceCommandSimple(self):
"""Agit sur le contenu.
Remplace les commandes sans arguments"""
for command, replace in config.listeReplaceSimple:
self.contenu = re.sub(command.regex, replace, self.contenu)
def replaceCommand(self):
"""Agit sur le contenu.
Remplace toutes les commande de listeReplace de config.py"""
for command, arg in config.listeReplace:
self.contenu = command.replaceCommand(self.contenu, arg)
def replaceText(self):
"""Agit sur le contenu.
Remplacement simple sans regex"""
for texaremplacer, textederemplacement in config.listeReplaceText:
self.contenu = self.contenu.replace(
texaremplacer, textederemplacement)
def convertEnumerate(self):
"""Agit sur les lignes.
Converti les environnements enumerate en listes html"""
level_enumerate = 0
enumi = 0
enumii = 0
new_lines = []
arabic = "abcdefghijklmnopqrstuvwxz"
for line in self.lines:
if r"\begin{enumerate}" in line or r"\begin{colenumerate}" in line:
level_enumerate = level_enumerate + 1
line = ""
elif r"\end{enumerate}" in line or r"\end{colenumerate}" in line:
if level_enumerate == 2:
enumii = 0
else:
enumi = 0
level_enumerate = level_enumerate - 1
line = ""
elif r"\item" in line and level_enumerate != 0:
if level_enumerate == 1:
enumi = enumi + 1
line = line.replace(r"\item", str(enumi)+". ")
line = "\n\n" + line
else:
line = line.replace(r"\item", arabic[enumii]+") ")
enumii = enumii + 1
line = "\n\n" + line
new_lines.append(line)
self.lines = new_lines
def convertItemize(self):
"""Agit sur les lignes.
Converti les environnements itemize en listes html"""
new_lines = []
for line in self.lines:
if r"\begin{itemize}" in line:
line = "\n\n"
elif r"\end{itemize}" in line:
line = "\n\n"
elif r"\item" in line:
line = line.replace(r"\item", "\n\n+ ")
new_lines.append(line)
self.lines = new_lines
def findPstricks(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Pstricks"""
in_pstricks = False
lignes_pstricks = []
pstricks = []
for line in self.lines:
if in_pstricks:
lignes_pstricks.append(line)
if r"\end{pspicture" in line:
in_pstricks = False
pstricks.append("\n".join(lignes_pstricks))
lignes_pstricks = []
else:
if r"\psset" in line or r"\begin{pspicture" in line:
in_pstricks = True
lignes_pstricks.append(line)
self.pstricks = pstricks
def findConvert(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Convert"""
in_convert = False
lignes_convert = []
convert = []
for line in self.lines:
if in_convert:
lignes_convert.append(line)
if r"\end{convert}" in line:
in_convert = False
convert.append("\n".join(lignes_convert))
lignes_convert = []
else:
if r"\begin{convert}" in line:
in_convert = True
lignes_convert.append(line)
self.convert = convert
def findTikz(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Tikz"""
in_tikz = False
lignes_tikz = []
tikz = []
for line in self.lines:
if in_tikz:
lignes_tikz.append(line)
if r"\end{tikz" in line:
in_tikz = False
tikz.append("\n".join(lignes_tikz))
lignes_tikz = []
else:
if r"\begin{tikz" in line:
in_tikz = True
lignes_tikz.append(line)
self.tikz = tikz
def findTab(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements tab..."""
in_tab = False
lignes_tab = []
tab = []
for line in self.lines:
if in_tab:
lignes_tab.append(line)
if r"\end{tab" in line:
in_tab = False
tab.append("\n".join(lignes_tab))
lignes_tab = []
else:
if r"\begin{tab" in line:
in_tab = True
lignes_tab.append(line)
self.tab = tab
def replacePstricks(self):
if len(self.pstricks) == 0:
return
preamble = config.TEX_HEADER
for figure in self.pstricks:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system(f"dvisvgm temp.dvi")
try:
os.rename("temp-1.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def replaceConvert(self):
self.collapseLines()
if len(self.convert) == 0:
return
preamble = config.TEX_HEADER
for figure in self.convert:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
print(figure)
self.contenu = self.contenu.replace(
figure,
'+".svg)")
self.lines = self.contenu.splitlines()
def replaceTikz(self):
if len(self.tikz) == 0:
return
preamble = config.TEX_HEADER
for figure in self.tikz:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def processGraphics(self):
"""Remplace les includegraphics"""
if "includegraphics" in self.contenu:
graphic = self.contenu.split(r"\includegraphics")
self.contenu = graphic[0]
for i in range(len(graphic)-1):
apres = graphic[i+1]
#apres = apres[apres.find("{")+1:]
commande = "\\includegraphics"+apres[:apres.find("}")+1]
self.nbfigure = self.nbfigure + 1
total = config.TEX_HEADER + commande + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("xelatex temp.tex")
os.system("magick convert temp.pdf temp.png")
try:
os.rename("temp.png", "figure"+str(self.nbfigure)+".png")
except:
print("Le fichier figure" +
str(self.nbfigure)+".png existe déjà")
apres = apres[apres.find("}")+1:]
self.contenu = self.contenu + \
' +".png) "+apres
def processTab(self, intab):
"""Convertit le contenu d'un tabular ou tabularx en Markdown"""
tableau = ""
delemiteur = ""
intab = intab.replace("\\hline", '')
lines = intab.split("\n")
newlines = []
for line in lines:
if line == '':
pass
else:
nbRow = line.count('&')
line = line.replace("\\\\", '').replace("&", " | ")
line = "| " + line + " |"
newlines.append(line)
delemiteur = ""
for i in range(nbRow + 1):
delemiteur = delemiteur + "|---"
delemiteur = delemiteur + "|"
for i in range(len(newlines)):
if i == 1:
tableau = tableau + delemiteur + "\n"+newlines[1] + "\n"
else:
tableau = tableau + newlines[i] + "\n"
return tableau
def soupTab(self):
"""Utilise TexSoup pour tabular et tabularx"""
soup = TexSoup(self.contenu)
for tabu in soup.find_all('tabular'):
print(tabu)
arg = []
for i in tabu.contents:
arg.append(str(i))
intab = "".join(arg)
tableau = self.processTab(intab)
self.contenu = self.contenu.replace(repr(tabu), tableau)
def replaceTab(self):
if len(self.tab) == 0:
return
preamble = config.TEX_HEADER
for figure in self.tab:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def checkEnv(self):
for arg in config.listeEnv:
begin = "\\begin{"+arg[0]+"}"
end = "\\end{"+arg[0]+"}"
self.contenu = self.contenu.replace(begin, arg[1])
self.contenu = self.contenu.replace(end, arg[2])
def convertExos(self):
self.contenu = self.contenu.replace('\\end{exercice}', ':::\n\n')
self.lines = self.contenu.split('\n')
newlines = []
for line in self.lines:
if "\\begin{exercice}" in line:
self.exo = self.exo + 1
line = line.replace(
'\\begin{exercice}', '::: tip Exercice '+str(self.exo)+' ')
newlines.append(line)
self.lines = newlines
self.collapseLines()
def process(self):
"""Effectue les taches de conversion"""
# Opérations sur les lignes
self.cleanSpace()
self.cleanRem()
self.findConvert()
if self.manipFiles:
self.replaceConvert()
self.findPstricks()
self.findTikz()
self.findTab()
# Convertion tabular
self.collapseLines()
self.soupTab()
# Convertion PsTricks TikZ Images
if self.manipFiles:
self.replacePstricks()
self.replaceTikz()
self.processGraphics()
# Enumerate et Itemize
self.lines = self.contenu.splitlines()
self.convertEnumerate()
self.convertItemize()
# Opérations sur le contenu
self.collapseLines()
self.convertExos()
self.checkEnv()
self.cleanCommand()
self.replaceCommand()
self.cleanLayout()
self.replaceCommandSimple()
self.replaceText()
self.contenu = self.contenu.replace("{}", "")
self.contenu = self.contenu.replace("[ ]", "")
#self.contenu = self.contenu.replace("\\\\", "\n\n")
self.cleanLines()
| #!/usr/local/bin/python
# -*- coding:utf-8 -*-
# Auteur: <NAME>
# Convertion automatique de LaTeX en Markdown
import codecs
import os
import re
from latexconvertmd import LaTeXCommands, config
from slugify import slugify
from TexSoup import TexSoup
class Source:
def __init__(self, original="", exportFolder=config.outputFolder, file=False):
self.original = original # On garde l'original pour développement
self.contenu = original
self.manipFiles = True # Manipulation de fichiers
self.lines = self.contenu.splitlines()
self.exportFolder = exportFolder
self.nbfigure = 0
self.exo = 0
if file != False:
self.outputFolder = file
def collapseLines(self):
"""Recolle les lignes dans self.contenu"""
self.contenu = "\n".join(self.lines)
def cleanSpace(self):
"""Agit sur les lignes.
Enlève les espaces en début et fin de chaque ligne"""
new_lines = []
for line in self.lines:
line = line.strip()
new_lines.append(line)
self.lines = new_lines
def cleanRem(self):
"""Agit sur les lignes.
Enlèves toutes les remarques %"""
self.collapseLines() # On commence par protégéer les \%
self.contenu = self.contenu.replace("\\%", "!!rem!!")
self.lines = self.contenu.splitlines()
new_lines = []
for line in self.lines:
remarque = line.find("%")
if remarque != -1:
line = line[:remarque]
new_lines.append(line)
self.lines = new_lines
self.collapseLines()
self.contenu = self.contenu.replace("!!rem!!", "\\%")
self.lines = self.contenu.splitlines()
def cleanLines(self):
"""Agit sur le contenu.
Supprime les lignes vides"""
while "\n\n\n" in self.contenu:
self.contenu = self.contenu.replace("\n\n\n", "\n\n")
def cleanCommand(self):
"""Agit sur le contenu.
Supprime toutes les commandes de listeCommandesClean et delCommand du fichier config.py
Et au passage gère les sections et subsections"""
soup = TexSoup(self.contenu)
for section in soup.find_all('section'):
section.replace('## '+section.string)
for subsection in soup.find_all('subsection'):
subsection.replace('### '+subsection.string)
for command in config.delCommands:
for include in soup.find_all(command):
include.delete()
self.contenu = repr(soup)
for command in config.listeCommandesClean:
self.contenu = re.sub(command.regex, "", self.contenu)
self.lines = self.contenu.splitlines()
def cleanLayout(self):
"""Agit sur le contenu.
Supprime toutes les commandes de listeLayout du fichier config.py"""
for command in config.listeCommandesLayout:
self.contenu = command.cleanCommand(self.contenu)
def replaceCommandSimple(self):
"""Agit sur le contenu.
Remplace les commandes sans arguments"""
for command, replace in config.listeReplaceSimple:
self.contenu = re.sub(command.regex, replace, self.contenu)
def replaceCommand(self):
"""Agit sur le contenu.
Remplace toutes les commande de listeReplace de config.py"""
for command, arg in config.listeReplace:
self.contenu = command.replaceCommand(self.contenu, arg)
def replaceText(self):
"""Agit sur le contenu.
Remplacement simple sans regex"""
for texaremplacer, textederemplacement in config.listeReplaceText:
self.contenu = self.contenu.replace(
texaremplacer, textederemplacement)
def convertEnumerate(self):
"""Agit sur les lignes.
Converti les environnements enumerate en listes html"""
level_enumerate = 0
enumi = 0
enumii = 0
new_lines = []
arabic = "abcdefghijklmnopqrstuvwxz"
for line in self.lines:
if r"\begin{enumerate}" in line or r"\begin{colenumerate}" in line:
level_enumerate = level_enumerate + 1
line = ""
elif r"\end{enumerate}" in line or r"\end{colenumerate}" in line:
if level_enumerate == 2:
enumii = 0
else:
enumi = 0
level_enumerate = level_enumerate - 1
line = ""
elif r"\item" in line and level_enumerate != 0:
if level_enumerate == 1:
enumi = enumi + 1
line = line.replace(r"\item", str(enumi)+". ")
line = "\n\n" + line
else:
line = line.replace(r"\item", arabic[enumii]+") ")
enumii = enumii + 1
line = "\n\n" + line
new_lines.append(line)
self.lines = new_lines
def convertItemize(self):
"""Agit sur les lignes.
Converti les environnements itemize en listes html"""
new_lines = []
for line in self.lines:
if r"\begin{itemize}" in line:
line = "\n\n"
elif r"\end{itemize}" in line:
line = "\n\n"
elif r"\item" in line:
line = line.replace(r"\item", "\n\n+ ")
new_lines.append(line)
self.lines = new_lines
def findPstricks(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Pstricks"""
in_pstricks = False
lignes_pstricks = []
pstricks = []
for line in self.lines:
if in_pstricks:
lignes_pstricks.append(line)
if r"\end{pspicture" in line:
in_pstricks = False
pstricks.append("\n".join(lignes_pstricks))
lignes_pstricks = []
else:
if r"\psset" in line or r"\begin{pspicture" in line:
in_pstricks = True
lignes_pstricks.append(line)
self.pstricks = pstricks
def findConvert(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Convert"""
in_convert = False
lignes_convert = []
convert = []
for line in self.lines:
if in_convert:
lignes_convert.append(line)
if r"\end{convert}" in line:
in_convert = False
convert.append("\n".join(lignes_convert))
lignes_convert = []
else:
if r"\begin{convert}" in line:
in_convert = True
lignes_convert.append(line)
self.convert = convert
def findTikz(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements Tikz"""
in_tikz = False
lignes_tikz = []
tikz = []
for line in self.lines:
if in_tikz:
lignes_tikz.append(line)
if r"\end{tikz" in line:
in_tikz = False
tikz.append("\n".join(lignes_tikz))
lignes_tikz = []
else:
if r"\begin{tikz" in line:
in_tikz = True
lignes_tikz.append(line)
self.tikz = tikz
def findTab(self):
"""Agit sur les lignes.
Essaie de trouver les envir
onnements tab..."""
in_tab = False
lignes_tab = []
tab = []
for line in self.lines:
if in_tab:
lignes_tab.append(line)
if r"\end{tab" in line:
in_tab = False
tab.append("\n".join(lignes_tab))
lignes_tab = []
else:
if r"\begin{tab" in line:
in_tab = True
lignes_tab.append(line)
self.tab = tab
def replacePstricks(self):
if len(self.pstricks) == 0:
return
preamble = config.TEX_HEADER
for figure in self.pstricks:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system(f"dvisvgm temp.dvi")
try:
os.rename("temp-1.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def replaceConvert(self):
self.collapseLines()
if len(self.convert) == 0:
return
preamble = config.TEX_HEADER
for figure in self.convert:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
print(figure)
self.contenu = self.contenu.replace(
figure,
'+".svg)")
self.lines = self.contenu.splitlines()
def replaceTikz(self):
if len(self.tikz) == 0:
return
preamble = config.TEX_HEADER
for figure in self.tikz:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def processGraphics(self):
"""Remplace les includegraphics"""
if "includegraphics" in self.contenu:
graphic = self.contenu.split(r"\includegraphics")
self.contenu = graphic[0]
for i in range(len(graphic)-1):
apres = graphic[i+1]
#apres = apres[apres.find("{")+1:]
commande = "\\includegraphics"+apres[:apres.find("}")+1]
self.nbfigure = self.nbfigure + 1
total = config.TEX_HEADER + commande + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("xelatex temp.tex")
os.system("magick convert temp.pdf temp.png")
try:
os.rename("temp.png", "figure"+str(self.nbfigure)+".png")
except:
print("Le fichier figure" +
str(self.nbfigure)+".png existe déjà")
apres = apres[apres.find("}")+1:]
self.contenu = self.contenu + \
' +".png) "+apres
def processTab(self, intab):
"""Convertit le contenu d'un tabular ou tabularx en Markdown"""
tableau = ""
delemiteur = ""
intab = intab.replace("\\hline", '')
lines = intab.split("\n")
newlines = []
for line in lines:
if line == '':
pass
else:
nbRow = line.count('&')
line = line.replace("\\\\", '').replace("&", " | ")
line = "| " + line + " |"
newlines.append(line)
delemiteur = ""
for i in range(nbRow + 1):
delemiteur = delemiteur + "|---"
delemiteur = delemiteur + "|"
for i in range(len(newlines)):
if i == 1:
tableau = tableau + delemiteur + "\n"+newlines[1] + "\n"
else:
tableau = tableau + newlines[i] + "\n"
return tableau
def soupTab(self):
"""Utilise TexSoup pour tabular et tabularx"""
soup = TexSoup(self.contenu)
for tabu in soup.find_all('tabular'):
print(tabu)
arg = []
for i in tabu.contents:
arg.append(str(i))
intab = "".join(arg)
tableau = self.processTab(intab)
self.contenu = self.contenu.replace(repr(tabu), tableau)
def replaceTab(self):
if len(self.tab) == 0:
return
preamble = config.TEX_HEADER
for figure in self.tab:
self.nbfigure = self.nbfigure + 1
total = preamble + figure + r"\end{document}"
f = codecs.open("temp.tex", "w", "utf-8")
f.write(total)
f.close()
os.system("latex temp.tex")
os.system("dvisvgm temp")
try:
os.rename("temp.svg", "figure"+str(self.nbfigure)+".svg")
except:
print("Le fichier figure"+str(self.nbfigure)+".svg existe déjà")
self.contenu = self.contenu.replace(
figure,
'+".svg)")
def checkEnv(self):
for arg in config.listeEnv:
begin = "\\begin{"+arg[0]+"}"
end = "\\end{"+arg[0]+"}"
self.contenu = self.contenu.replace(begin, arg[1])
self.contenu = self.contenu.replace(end, arg[2])
def convertExos(self):
self.contenu = self.contenu.replace('\\end{exercice}', ':::\n\n')
self.lines = self.contenu.split('\n')
newlines = []
for line in self.lines:
if "\\begin{exercice}" in line:
self.exo = self.exo + 1
line = line.replace(
'\\begin{exercice}', '::: tip Exercice '+str(self.exo)+' ')
newlines.append(line)
self.lines = newlines
self.collapseLines()
def process(self):
"""Effectue les taches de conversion"""
# Opérations sur les lignes
self.cleanSpace()
self.cleanRem()
self.findConvert()
if self.manipFiles:
self.replaceConvert()
self.findPstricks()
self.findTikz()
self.findTab()
# Convertion tabular
self.collapseLines()
self.soupTab()
# Convertion PsTricks TikZ Images
if self.manipFiles:
self.replacePstricks()
self.replaceTikz()
self.processGraphics()
# Enumerate et Itemize
self.lines = self.contenu.splitlines()
self.convertEnumerate()
self.convertItemize()
# Opérations sur le contenu
self.collapseLines()
self.convertExos()
self.checkEnv()
self.cleanCommand()
self.replaceCommand()
self.cleanLayout()
self.replaceCommandSimple()
self.replaceText()
self.contenu = self.contenu.replace("{}", "")
self.contenu = self.contenu.replace("[ ]", "")
#self.contenu = self.contenu.replace("\\\\", "\n\n")
self.cleanLines()
| fr | 0.91278 | #!/usr/local/bin/python # -*- coding:utf-8 -*- # Auteur: <NAME> # Convertion automatique de LaTeX en Markdown # On garde l'original pour développement # Manipulation de fichiers Recolle les lignes dans self.contenu Agit sur les lignes. Enlève les espaces en début et fin de chaque ligne Agit sur les lignes. Enlèves toutes les remarques % # On commence par protégéer les \% Agit sur le contenu. Supprime les lignes vides Agit sur le contenu. Supprime toutes les commandes de listeCommandesClean et delCommand du fichier config.py Et au passage gère les sections et subsections # '+section.string) ## '+subsection.string) Agit sur le contenu. Supprime toutes les commandes de listeLayout du fichier config.py Agit sur le contenu. Remplace les commandes sans arguments Agit sur le contenu. Remplace toutes les commande de listeReplace de config.py Agit sur le contenu. Remplacement simple sans regex Agit sur les lignes. Converti les environnements enumerate en listes html Agit sur les lignes. Converti les environnements itemize en listes html Agit sur les lignes. Essaie de trouver les envir onnements Pstricks Agit sur les lignes. Essaie de trouver les envir onnements Convert Agit sur les lignes. Essaie de trouver les envir onnements Tikz Agit sur les lignes. Essaie de trouver les envir onnements tab... Remplace les includegraphics #apres = apres[apres.find("{")+1:] Convertit le contenu d'un tabular ou tabularx en Markdown Utilise TexSoup pour tabular et tabularx Effectue les taches de conversion # Opérations sur les lignes # Convertion tabular # Convertion PsTricks TikZ Images # Enumerate et Itemize # Opérations sur le contenu #self.contenu = self.contenu.replace("\\\\", "\n\n") | 2.709206 | 3 |
addons/mixer/asset_bank.py | trisadmeslek/V-Sekai-Blender-tools | 0 | 6614125 | <gh_stars>0
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This module defines how Asset Bank messages are handled.
Asset Bank is another addon we develop and that can be controlled through Mixer.
We plan to extract this code in a plug-in system in the future to avoid polluting the core of Mixer.
"""
from enum import IntEnum
import bpy
from mixer.share_data import share_data
import mixer.broadcaster.common as common
class AssetBankAction(IntEnum):
LIST_REQUEST = 0
LIST_RESPONSE = 1
IMPORT_REQUEST = 2
IMPORT_RESPONSE = 3
def send_asset_bank_entries():
if bpy.context.window_manager.uas_asset_bank is None:
return
bpy.ops.uas.asset_bank_refresh()
assets = bpy.context.window_manager.uas_asset_bank.assets
names = []
tags = []
thumbnails = []
for asset in assets:
names.append(asset.nice_name)
tags.append(asset.tags)
thumbnails.append(asset.thumbnail_path)
buffer = (
common.encode_int(AssetBankAction.LIST_RESPONSE)
+ common.encode_string_array(names)
+ common.encode_string_array(tags)
+ common.encode_string_array(thumbnails)
)
share_data.client.add_command(common.Command(common.MessageType.ASSET_BANK, buffer, 0))
def receive_message(data):
index = 0
action, index = common.decode_int(data, index)
if action == AssetBankAction.LIST_REQUEST:
send_asset_bank_entries()
elif action == AssetBankAction.IMPORT_REQUEST:
import_asset(data, index)
def import_asset(data, index):
name, index = common.decode_string(data, index)
asset_index = -1
old_objects = set(bpy.context.scene.objects)
for asset in bpy.context.window_manager.uas_asset_bank.assets:
asset_index += 1
if asset.nice_name == name:
# Import
bpy.ops.uas.asset_bank_import(index=asset_index)
# Send imported object names
new_objects = set(bpy.context.scene.objects)
diff = new_objects - old_objects
for ob in diff:
send_imported_object_name(ob.name_full, name)
return
def send_imported_object_name(blender_name: str, nice_name: str):
buffer = (
common.encode_int(AssetBankAction.IMPORT_RESPONSE)
+ common.encode_string(blender_name)
+ common.encode_string(nice_name)
)
share_data.client.add_command(common.Command(common.MessageType.ASSET_BANK, buffer, 0))
| # GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This module defines how Asset Bank messages are handled.
Asset Bank is another addon we develop and that can be controlled through Mixer.
We plan to extract this code in a plug-in system in the future to avoid polluting the core of Mixer.
"""
from enum import IntEnum
import bpy
from mixer.share_data import share_data
import mixer.broadcaster.common as common
class AssetBankAction(IntEnum):
LIST_REQUEST = 0
LIST_RESPONSE = 1
IMPORT_REQUEST = 2
IMPORT_RESPONSE = 3
def send_asset_bank_entries():
if bpy.context.window_manager.uas_asset_bank is None:
return
bpy.ops.uas.asset_bank_refresh()
assets = bpy.context.window_manager.uas_asset_bank.assets
names = []
tags = []
thumbnails = []
for asset in assets:
names.append(asset.nice_name)
tags.append(asset.tags)
thumbnails.append(asset.thumbnail_path)
buffer = (
common.encode_int(AssetBankAction.LIST_RESPONSE)
+ common.encode_string_array(names)
+ common.encode_string_array(tags)
+ common.encode_string_array(thumbnails)
)
share_data.client.add_command(common.Command(common.MessageType.ASSET_BANK, buffer, 0))
def receive_message(data):
index = 0
action, index = common.decode_int(data, index)
if action == AssetBankAction.LIST_REQUEST:
send_asset_bank_entries()
elif action == AssetBankAction.IMPORT_REQUEST:
import_asset(data, index)
def import_asset(data, index):
name, index = common.decode_string(data, index)
asset_index = -1
old_objects = set(bpy.context.scene.objects)
for asset in bpy.context.window_manager.uas_asset_bank.assets:
asset_index += 1
if asset.nice_name == name:
# Import
bpy.ops.uas.asset_bank_import(index=asset_index)
# Send imported object names
new_objects = set(bpy.context.scene.objects)
diff = new_objects - old_objects
for ob in diff:
send_imported_object_name(ob.name_full, name)
return
def send_imported_object_name(blender_name: str, nice_name: str):
buffer = (
common.encode_int(AssetBankAction.IMPORT_RESPONSE)
+ common.encode_string(blender_name)
+ common.encode_string(nice_name)
)
share_data.client.add_command(common.Command(common.MessageType.ASSET_BANK, buffer, 0)) | en | 0.870809 | # GPLv3 License # # Copyright (C) 2020 Ubisoft # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. This module defines how Asset Bank messages are handled. Asset Bank is another addon we develop and that can be controlled through Mixer. We plan to extract this code in a plug-in system in the future to avoid polluting the core of Mixer. # Import # Send imported object names | 1.923756 | 2 |
src/pcbLibraryManager/parts/icGenerator.py | NiceCircuits/pcbLibraryManager | 0 | 6614126 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 14:27:28 2016
@author: piotr at nicecircuits.com
"""
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.part import part
from symbols.symbolsIC import *
from footprints.footprintSmdQuad import *
from libraryManager.footprintPrimitive import *
import pyexcel_ods3
import numpy as np
test_path=r"D:\pcbLibraryManager\src\pcbLibraryManager\libraries\STM32_LQFP48.ods"
class icGenerator():
"""
universal generator for IC symbols from xls files
"""
def generate_advanced(pinout_file_name, symbol_file_name="", footprint_file_name=""):
"""
generate one or many parts from pinout file. Read more data from file.
pinout_file_name: name of file with pinouts
symbol_file_name: name of file with symbols
footprint_file_name: name of file with footprints
"""
ret=[]
if symbol_file_name=="":
symbol_file_name = pinout_file_name
if footprint_file_name=="":
footprint_file_name = pinout_file_name
parts=icGenerator.load_parts_advanced(pinout_file_name)
for part_descr in parts:
if part_descr:
if part_descr[0]:
new_part=part(part_descr[0], defaults.icRefDes)
symbols = icGenerator.load_symbol_advanced(symbol_file_name, part_descr[1])
new_part.symbols.extend(symbols)
footprints = icGenerator.load_footprints_advanced(footprint_file_name) #, part_descr[2]
new_part.footprints.extend(footprints)
ret.append(new_part)
return ret
# todo: alternate part
#new_part=part(part_descr[0]+"_alt", defaults.icRefDes)
def load_footprints_advanced(fileName):
ret=[]
params = icGenerator.load_ods_section(fileName, "Footprint", \
stopString="Mechanical", vector=True, dictionary=True)
mechanical = icGenerator.load_ods_section(fileName, "Footprint", \
startString="Mechanical", stopString="Footprint", vector=False, dictionary=True)
footprint = icGenerator.load_ods_section(fileName, "Footprint", \
startString="Footprint", vector=False, dictionary=True)
for variant in footprint.keys():
if params["Type"]=="QFP":
ret.append(footprintQfpParametrized(params, mechanical, footprint, variant))
if params["Type"]=="QFN":
ret.append(footprintQfnParametrized(params, mechanical, footprint, variant))
else:
raise ValueError("Footprint type %s found in %s unsupported" % (params["Type"], fileName))
return ret
def test_load_footprints_advanced():
print(icGenerator.load_footprints_advanced(test_path))
def load_parts_advanced(fileName):
"""
Load parts description from pinout file.
"""
ret = icGenerator.load_ods_section(fileName, "Part", vector=False)
return ret[1:]
def load_ods_sheet(fileName, sheetName):
try:
data = pyexcel_ods3.get_data(fileName)[sheetName]
except KeyError:
raise ValueError("No \"%s\" tab found in %s" %(sheetName, fileName))
except:
raise ValueError("Error opening file " + fileName)
return data
def load_ods_section(fileName, sheetName, startString="", stopString="", \
vector=False, dictionary=False):
"""
data:
a,1
b,2
read as vector and dictionary:
{"a":1' "b":2}
read vector and not dictionary:
[["a",1], ["b",2]]
data:
,v1,v2
a,1,2
b,3,4,5,6
read as not vector and not dictionary:
[[,"v1","v2"],
["a",1,2],
["b",3,4]]
read as not vector and dictionary:
{"v1":{"a":1,"b":3},
"v2":{"a":2,"b":4}}
"""
if dictionary:
output={}
elif vector:
output=[]
else:
output=[[]]
data=icGenerator.load_ods_sheet(fileName, sheetName)
header = True
variants=[]
if startString:
save_data=False
else:
save_data=True
for line in data:
if line and save_data:
if stopString and line[0]==stopString:
break
if vector and line[0] and len(line)>=2:
if dictionary:
output[str(line[0]).strip()]=line[1]
else:
output.append([str(line[0]).strip(), line[1]])
elif not vector:
if header:
header=False
start=1 if dictionary else 0
for name in line[start:]:
name=str(name).strip()
if dictionary:
output[name]={}
else:
output[0].append(name)
variants.append(name)
elif dictionary:
for i in range(len(variants)):
if i<(len(line)-1):
output[variants[i]][line[0]]=line[i+1]
else:
output[variants[i]][line[0]]=None
else:
temp=[]
for i in range(len(variants)):
if i<len(line):
temp.append(line[i])
else:
temp.append(None)
output.append(temp)
else:
pass
else:
if line:
if str(line[0]).strip() == startString:
save_data = True
return output
def test_load_ods_section():
print(icGenerator.load_ods_section(test_path, "Footprint", stopString="Mechanical",\
vector=True, dictionary=True))
print(icGenerator.load_ods_section(test_path, "Footprint", stopString="Mechanical",\
vector=True, dictionary=False))
print(icGenerator.load_ods_section(test_path, "Footprint", startString="Footprint",\
vector=False, dictionary=True))
print(icGenerator.load_ods_section(test_path, "Footprint", startString="Mechanical",\
stopString="Footprint",vector=False, dictionary=False))
def test_load_parts_advanced():
print(icGenerator.load_parts_advanced(test_path))
def load_symbol_advanced(fileName, symbolName):
try:
data = pyexcel_ods3.get_data(fileName)["Symbol"]
except KeyError:
raise ValueError("No \"Symbol\" tab found in " + fileName)
except:
raise ValueError("Error opening file " + fileName)
pinout_start=0
params={}
pinout=[]
symbols=[]
col=0
for line in data:
if line:
if line[0]:
if pinout_start==1:
for i in range(4,len(line),2):
if str(line[i]).strip()==str(symbolName).strip():
col=i
break
pinout_start=2
elif pinout_start==2:
if col:
pin = line[0]
unit = line[1]
side = 0 if line[2]=="L" else 1
position = line[3]
pin_name = line[i]
direction = line[i+1]
while len(pinout)<=unit:
pinout.append([[],[]])
while len(pinout[unit][side])<=position:
pinout[unit][side].append(None)
pinout[unit][side][position]=[pin_name, pin, direction]
else:
raise ValueError("Symbol %s not found in file %s" % (symbolName, fileName))
else:
if str(line[0]).lower()=="pinout":
pinout_start = 1
else:
if len(line)>=2:
params[str(line[0]).strip()]=line[1]
else:
pass
else:
pass
i=0
for sym in pinout:
if len(pinout)>1:
postfix = "_%d"%i
width = params["Width%d"%i]
else:
postfix = ""
width = params["Width"]
symbols.append(symbolIC(symbolName + postfix,\
pinsLeft=sym[0], pinsRight=sym[1], width=width, refDes=defaults.icRefDes,showPinNames=True, showPinNumbers=True))
i=i+1
return symbols
def test_load_symbol_advanced():
print(icGenerator.load_symbol_advanced(test_path))
"""
universal generator for IC symbols from xls files
"""
def generate(fileName, pinNames=[], footprints=[], namePosfix="", symbolType="dual",size=0):
"""
generate one or many parts from pinout file
fileName: name of file with pinouts
pinNames: pin configuration: [[pins left], [pins right],...[pins left], [pins right]]
like: [["1","2","3"],["6","5","4"]] or [["1","2"],["6","5"],["4","7"],["8","9"]]
footprints: list of footprint objects
namePostfix: postfix to IC name read from pinout
"""
pinout=icGenerator.loadPinout(fileName)
ret=[]
if not pinNames:
#auto generate
if symbolType=="quad":
nPins=len(pinout[0]["pins"])
nSide=int((nPins)/4)
#if additional pin above N*4 (thermal pad)
plus=((nPins%4)>0)*1
pinNames=np.array([[None]*(nSide+plus)]*4)
for side in range(4):
for i in range(nSide):
pinNames[side,i]=pinout[0]["pins"][i+side*nSide]
if plus:
pinNames[3,nSide]=pinout[0]["pins"][nPins-1]
else:
raise ValueError("Auto pinout for %s symbol type not implemented yet!" % symbolType)
#for each pinout variant
for v in pinout:
# generate symbol(s)
symbols=[]
pins=[]
for pinNameCol in pinNames:
pinCol = []
for p in pinNameCol:
if p:
p=str(p)
pinCol.append([v["pinNames"][p], p, v["pinTypes"][p]])
else:
pinCol.append(None)
pins.append(pinCol)
if symbolType=="dual":
#for 1..2 pin columns - one symbol, for 2..3 - 2 etc.
nSymbols = int((len(pinNames)+1)/2)
for i in range(nSymbols):
if nSymbols>1:
symPostfix = "_%d" % i
else:
symPostfix = ""
symbols.append(symbolIC(v["name"]+symPostfix+namePosfix, pinsLeft=pins[i*2], pinsRight=pins[i*2+1],\
width=size, refDes=defaults.icRefDes,showPinNames=True, showPinNumbers=True))
elif symbolType=="quad":
symbols.append(symbolICquad(v["name"]+namePosfix,\
pins=pins,size=size))
else:
raise ValueError("invalid symbolType: %s!" % symbolType)
for p in v["partNames"]:
_part = part(p+namePosfix, defaults.icRefDes)
_part.symbols.extend(symbols)
_part.footprints.extend(footprints)
ret.append(_part)
return ret
def loadPinout(fileName):
"""
Load pinout from ods file.
"""
try:
sheet = np.array(pyexcel_ods3.get_data(fileName)["pinout"])
test=sheet[:,0] #check proper conversion to numpy.array
except Exception as ex:
print("Error! Maybe sheet contains empty cells (especially at ends of rows)?")
raise ex
rowV = sheet[0]
nVersions = int((len(rowV)-1)/2)
ret=[0]*nVersions #initialize return structure
for nV in range(nVersions):
ret[nV]={}
ret[nV]["name"]=rowV[nV*2+2]
partNames=sheet[1,nV*2+2]
partNames=partNames.split("\n")
ret[nV]["partNames"]=partNames
ret[nV]["pins"]=sheet[2:,0]
pinTypes=sheet[2:,nV*2+1]
pinNames=sheet[2:,nV*2+2]
ret[nV]["pinTypes"]={}
ret[nV]["pinNames"]={}
for i in range(len(ret[nV]["pins"])):
ret[nV]["pinTypes"][ret[nV]["pins"][i]]=pinType.fromStr[pinTypes[i]]
ret[nV]["pinNames"][ret[nV]["pins"][i]]=pinNames[i]
return ret
def _testLoadPinout():
"""
test for loadPinout function
"""
print(icGenerator.loadPinout("pinoutTest.ods"))
def _testGenerate():
"""
test for generate function
"""
fp=[footprintQfp(32, 0.8, density=density) for density in ["N", "L", "M"]]
pins=[["1","2",None,"3","4"],["5","6","7","8"]]
print(icGenerator.generate("pinoutTest.ods",pins,fp,""))
def _testGenerate2():
"""
test for generate function
"""
print(icGenerator.generate("pinoutTest.ods",symbolType="quad"))
if __name__ == "__main__":
icGenerator.test_load_footprints_advanced()
| # -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 14:27:28 2016
@author: piotr at nicecircuits.com
"""
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.part import part
from symbols.symbolsIC import *
from footprints.footprintSmdQuad import *
from libraryManager.footprintPrimitive import *
import pyexcel_ods3
import numpy as np
test_path=r"D:\pcbLibraryManager\src\pcbLibraryManager\libraries\STM32_LQFP48.ods"
class icGenerator():
"""
universal generator for IC symbols from xls files
"""
def generate_advanced(pinout_file_name, symbol_file_name="", footprint_file_name=""):
"""
generate one or many parts from pinout file. Read more data from file.
pinout_file_name: name of file with pinouts
symbol_file_name: name of file with symbols
footprint_file_name: name of file with footprints
"""
ret=[]
if symbol_file_name=="":
symbol_file_name = pinout_file_name
if footprint_file_name=="":
footprint_file_name = pinout_file_name
parts=icGenerator.load_parts_advanced(pinout_file_name)
for part_descr in parts:
if part_descr:
if part_descr[0]:
new_part=part(part_descr[0], defaults.icRefDes)
symbols = icGenerator.load_symbol_advanced(symbol_file_name, part_descr[1])
new_part.symbols.extend(symbols)
footprints = icGenerator.load_footprints_advanced(footprint_file_name) #, part_descr[2]
new_part.footprints.extend(footprints)
ret.append(new_part)
return ret
# todo: alternate part
#new_part=part(part_descr[0]+"_alt", defaults.icRefDes)
def load_footprints_advanced(fileName):
ret=[]
params = icGenerator.load_ods_section(fileName, "Footprint", \
stopString="Mechanical", vector=True, dictionary=True)
mechanical = icGenerator.load_ods_section(fileName, "Footprint", \
startString="Mechanical", stopString="Footprint", vector=False, dictionary=True)
footprint = icGenerator.load_ods_section(fileName, "Footprint", \
startString="Footprint", vector=False, dictionary=True)
for variant in footprint.keys():
if params["Type"]=="QFP":
ret.append(footprintQfpParametrized(params, mechanical, footprint, variant))
if params["Type"]=="QFN":
ret.append(footprintQfnParametrized(params, mechanical, footprint, variant))
else:
raise ValueError("Footprint type %s found in %s unsupported" % (params["Type"], fileName))
return ret
def test_load_footprints_advanced():
print(icGenerator.load_footprints_advanced(test_path))
def load_parts_advanced(fileName):
"""
Load parts description from pinout file.
"""
ret = icGenerator.load_ods_section(fileName, "Part", vector=False)
return ret[1:]
def load_ods_sheet(fileName, sheetName):
try:
data = pyexcel_ods3.get_data(fileName)[sheetName]
except KeyError:
raise ValueError("No \"%s\" tab found in %s" %(sheetName, fileName))
except:
raise ValueError("Error opening file " + fileName)
return data
def load_ods_section(fileName, sheetName, startString="", stopString="", \
vector=False, dictionary=False):
"""
data:
a,1
b,2
read as vector and dictionary:
{"a":1' "b":2}
read vector and not dictionary:
[["a",1], ["b",2]]
data:
,v1,v2
a,1,2
b,3,4,5,6
read as not vector and not dictionary:
[[,"v1","v2"],
["a",1,2],
["b",3,4]]
read as not vector and dictionary:
{"v1":{"a":1,"b":3},
"v2":{"a":2,"b":4}}
"""
if dictionary:
output={}
elif vector:
output=[]
else:
output=[[]]
data=icGenerator.load_ods_sheet(fileName, sheetName)
header = True
variants=[]
if startString:
save_data=False
else:
save_data=True
for line in data:
if line and save_data:
if stopString and line[0]==stopString:
break
if vector and line[0] and len(line)>=2:
if dictionary:
output[str(line[0]).strip()]=line[1]
else:
output.append([str(line[0]).strip(), line[1]])
elif not vector:
if header:
header=False
start=1 if dictionary else 0
for name in line[start:]:
name=str(name).strip()
if dictionary:
output[name]={}
else:
output[0].append(name)
variants.append(name)
elif dictionary:
for i in range(len(variants)):
if i<(len(line)-1):
output[variants[i]][line[0]]=line[i+1]
else:
output[variants[i]][line[0]]=None
else:
temp=[]
for i in range(len(variants)):
if i<len(line):
temp.append(line[i])
else:
temp.append(None)
output.append(temp)
else:
pass
else:
if line:
if str(line[0]).strip() == startString:
save_data = True
return output
def test_load_ods_section():
print(icGenerator.load_ods_section(test_path, "Footprint", stopString="Mechanical",\
vector=True, dictionary=True))
print(icGenerator.load_ods_section(test_path, "Footprint", stopString="Mechanical",\
vector=True, dictionary=False))
print(icGenerator.load_ods_section(test_path, "Footprint", startString="Footprint",\
vector=False, dictionary=True))
print(icGenerator.load_ods_section(test_path, "Footprint", startString="Mechanical",\
stopString="Footprint",vector=False, dictionary=False))
def test_load_parts_advanced():
print(icGenerator.load_parts_advanced(test_path))
def load_symbol_advanced(fileName, symbolName):
try:
data = pyexcel_ods3.get_data(fileName)["Symbol"]
except KeyError:
raise ValueError("No \"Symbol\" tab found in " + fileName)
except:
raise ValueError("Error opening file " + fileName)
pinout_start=0
params={}
pinout=[]
symbols=[]
col=0
for line in data:
if line:
if line[0]:
if pinout_start==1:
for i in range(4,len(line),2):
if str(line[i]).strip()==str(symbolName).strip():
col=i
break
pinout_start=2
elif pinout_start==2:
if col:
pin = line[0]
unit = line[1]
side = 0 if line[2]=="L" else 1
position = line[3]
pin_name = line[i]
direction = line[i+1]
while len(pinout)<=unit:
pinout.append([[],[]])
while len(pinout[unit][side])<=position:
pinout[unit][side].append(None)
pinout[unit][side][position]=[pin_name, pin, direction]
else:
raise ValueError("Symbol %s not found in file %s" % (symbolName, fileName))
else:
if str(line[0]).lower()=="pinout":
pinout_start = 1
else:
if len(line)>=2:
params[str(line[0]).strip()]=line[1]
else:
pass
else:
pass
i=0
for sym in pinout:
if len(pinout)>1:
postfix = "_%d"%i
width = params["Width%d"%i]
else:
postfix = ""
width = params["Width"]
symbols.append(symbolIC(symbolName + postfix,\
pinsLeft=sym[0], pinsRight=sym[1], width=width, refDes=defaults.icRefDes,showPinNames=True, showPinNumbers=True))
i=i+1
return symbols
def test_load_symbol_advanced():
print(icGenerator.load_symbol_advanced(test_path))
"""
universal generator for IC symbols from xls files
"""
def generate(fileName, pinNames=[], footprints=[], namePosfix="", symbolType="dual",size=0):
"""
generate one or many parts from pinout file
fileName: name of file with pinouts
pinNames: pin configuration: [[pins left], [pins right],...[pins left], [pins right]]
like: [["1","2","3"],["6","5","4"]] or [["1","2"],["6","5"],["4","7"],["8","9"]]
footprints: list of footprint objects
namePostfix: postfix to IC name read from pinout
"""
pinout=icGenerator.loadPinout(fileName)
ret=[]
if not pinNames:
#auto generate
if symbolType=="quad":
nPins=len(pinout[0]["pins"])
nSide=int((nPins)/4)
#if additional pin above N*4 (thermal pad)
plus=((nPins%4)>0)*1
pinNames=np.array([[None]*(nSide+plus)]*4)
for side in range(4):
for i in range(nSide):
pinNames[side,i]=pinout[0]["pins"][i+side*nSide]
if plus:
pinNames[3,nSide]=pinout[0]["pins"][nPins-1]
else:
raise ValueError("Auto pinout for %s symbol type not implemented yet!" % symbolType)
#for each pinout variant
for v in pinout:
# generate symbol(s)
symbols=[]
pins=[]
for pinNameCol in pinNames:
pinCol = []
for p in pinNameCol:
if p:
p=str(p)
pinCol.append([v["pinNames"][p], p, v["pinTypes"][p]])
else:
pinCol.append(None)
pins.append(pinCol)
if symbolType=="dual":
#for 1..2 pin columns - one symbol, for 2..3 - 2 etc.
nSymbols = int((len(pinNames)+1)/2)
for i in range(nSymbols):
if nSymbols>1:
symPostfix = "_%d" % i
else:
symPostfix = ""
symbols.append(symbolIC(v["name"]+symPostfix+namePosfix, pinsLeft=pins[i*2], pinsRight=pins[i*2+1],\
width=size, refDes=defaults.icRefDes,showPinNames=True, showPinNumbers=True))
elif symbolType=="quad":
symbols.append(symbolICquad(v["name"]+namePosfix,\
pins=pins,size=size))
else:
raise ValueError("invalid symbolType: %s!" % symbolType)
for p in v["partNames"]:
_part = part(p+namePosfix, defaults.icRefDes)
_part.symbols.extend(symbols)
_part.footprints.extend(footprints)
ret.append(_part)
return ret
def loadPinout(fileName):
"""
Load pinout from ods file.
"""
try:
sheet = np.array(pyexcel_ods3.get_data(fileName)["pinout"])
test=sheet[:,0] #check proper conversion to numpy.array
except Exception as ex:
print("Error! Maybe sheet contains empty cells (especially at ends of rows)?")
raise ex
rowV = sheet[0]
nVersions = int((len(rowV)-1)/2)
ret=[0]*nVersions #initialize return structure
for nV in range(nVersions):
ret[nV]={}
ret[nV]["name"]=rowV[nV*2+2]
partNames=sheet[1,nV*2+2]
partNames=partNames.split("\n")
ret[nV]["partNames"]=partNames
ret[nV]["pins"]=sheet[2:,0]
pinTypes=sheet[2:,nV*2+1]
pinNames=sheet[2:,nV*2+2]
ret[nV]["pinTypes"]={}
ret[nV]["pinNames"]={}
for i in range(len(ret[nV]["pins"])):
ret[nV]["pinTypes"][ret[nV]["pins"][i]]=pinType.fromStr[pinTypes[i]]
ret[nV]["pinNames"][ret[nV]["pins"][i]]=pinNames[i]
return ret
def _testLoadPinout():
"""
test for loadPinout function
"""
print(icGenerator.loadPinout("pinoutTest.ods"))
def _testGenerate():
"""
test for generate function
"""
fp=[footprintQfp(32, 0.8, density=density) for density in ["N", "L", "M"]]
pins=[["1","2",None,"3","4"],["5","6","7","8"]]
print(icGenerator.generate("pinoutTest.ods",pins,fp,""))
def _testGenerate2():
"""
test for generate function
"""
print(icGenerator.generate("pinoutTest.ods",symbolType="quad"))
if __name__ == "__main__":
icGenerator.test_load_footprints_advanced()
| en | 0.73403 | # -*- coding: utf-8 -*- Created on Sat Jan 16 14:27:28 2016 @author: piotr at nicecircuits.com universal generator for IC symbols from xls files generate one or many parts from pinout file. Read more data from file. pinout_file_name: name of file with pinouts symbol_file_name: name of file with symbols footprint_file_name: name of file with footprints #, part_descr[2] # todo: alternate part #new_part=part(part_descr[0]+"_alt", defaults.icRefDes) Load parts description from pinout file. data: a,1 b,2 read as vector and dictionary: {"a":1' "b":2} read vector and not dictionary: [["a",1], ["b",2]] data: ,v1,v2 a,1,2 b,3,4,5,6 read as not vector and not dictionary: [[,"v1","v2"], ["a",1,2], ["b",3,4]] read as not vector and dictionary: {"v1":{"a":1,"b":3}, "v2":{"a":2,"b":4}} universal generator for IC symbols from xls files generate one or many parts from pinout file fileName: name of file with pinouts pinNames: pin configuration: [[pins left], [pins right],...[pins left], [pins right]] like: [["1","2","3"],["6","5","4"]] or [["1","2"],["6","5"],["4","7"],["8","9"]] footprints: list of footprint objects namePostfix: postfix to IC name read from pinout #auto generate #if additional pin above N*4 (thermal pad) #for each pinout variant # generate symbol(s) #for 1..2 pin columns - one symbol, for 2..3 - 2 etc. Load pinout from ods file. #check proper conversion to numpy.array #initialize return structure test for loadPinout function test for generate function test for generate function | 2.208728 | 2 |
mpf/devices/__init__.py | Scottacus64/mpf | 163 | 6614127 | <filename>mpf/devices/__init__.py
"""Module which contains all devices in MPF."""
| <filename>mpf/devices/__init__.py
"""Module which contains all devices in MPF."""
| en | 0.952721 | Module which contains all devices in MPF. | 1.312481 | 1 |
experimental/python/gui/hacks/docgen/__init__.py | skylarmt/Embroidermodder | 1 | 6614128 | __author__ = "<NAME> (<NAME>)"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2015 Embroidermodder"
__license__ = "zlib/libpng"
__version__ = "1.0"
| __author__ = "<NAME> (<NAME>)"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2015 Embroidermodder"
__license__ = "zlib/libpng"
__version__ = "1.0"
| none | 1 | 1.019349 | 1 | |
polecat/deploy/event.py | furious-luke/polecat | 4 | 6614129 | <reponame>furious-luke/polecat
from polecat.db.session import Session
class Event:
def __init__(self, event):
self.event = event
self.session = Session()
def is_http(self):
return False
def is_admin(self):
return self.event.get('event') == 'admin'
class HttpEvent(Event):
def __init__(self, event, request=None):
super().__init__(event)
self.request = request or event
def is_http(self):
return True
def get_authorization_header(self):
return self.request.headers.get('authorization', None)
| from polecat.db.session import Session
class Event:
def __init__(self, event):
self.event = event
self.session = Session()
def is_http(self):
return False
def is_admin(self):
return self.event.get('event') == 'admin'
class HttpEvent(Event):
def __init__(self, event, request=None):
super().__init__(event)
self.request = request or event
def is_http(self):
return True
def get_authorization_header(self):
return self.request.headers.get('authorization', None) | none | 1 | 2.590127 | 3 | |
exps/default/yolox_m_multi_match.py | CatchBeliF-TAT/YOLOX_new | 1 | 6614130 | <reponame>CatchBeliF-TAT/YOLOX_new
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
import torch.nn as nn
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.depth = 0.67
self.width = 0.75
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_model(self):
from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, multi_match=True)
self.model = YOLOX(backbone, head)
self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
return self.model | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
import torch.nn as nn
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.depth = 0.67
self.width = 0.75
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_model(self):
from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, multi_match=True)
self.model = YOLOX(backbone, head)
self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
return self.model | en | 0.719161 | #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. | 2.23976 | 2 |
circlefixedpts.py | cosmic1998/RDS-and-ET-coding | 0 | 6614131 | import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
#Fix number of points in the range to consider
num_pts = 50
#fix a
a = 0.1
alphas = np.linspace(0.01,a,num_pts)
#Create empty figure
fig = plt.figure()
#Create empty list
y_list = []
z_list = []
#Compute y_alpha and z_alpha
for i in range(num_pts):
y_alpha = np.arcsin(alphas[i]/a)/(2*np.pi)
z_alpha = 0.5-y_alpha
y_list.append(y_alpha)
z_list.append(z_alpha)
#Plot graph of fixed points
plt.scatter(alphas,np.mod(y_list,1),c='black',alpha=1,label=r'$y_ \alpha$')
plt.scatter(alphas,np.mod(z_list,1),c='green',alpha=0.6,label=r'$z_ \alpha$')
plt.title(r"Plot of fixed points as $\alpha$ approaches $a$")
plt.xlabel(r"$\alpha$")
plt.ylabel("Fixed point")
plt.legend()
plt.show()
| import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
#Fix number of points in the range to consider
num_pts = 50
#fix a
a = 0.1
alphas = np.linspace(0.01,a,num_pts)
#Create empty figure
fig = plt.figure()
#Create empty list
y_list = []
z_list = []
#Compute y_alpha and z_alpha
for i in range(num_pts):
y_alpha = np.arcsin(alphas[i]/a)/(2*np.pi)
z_alpha = 0.5-y_alpha
y_list.append(y_alpha)
z_list.append(z_alpha)
#Plot graph of fixed points
plt.scatter(alphas,np.mod(y_list,1),c='black',alpha=1,label=r'$y_ \alpha$')
plt.scatter(alphas,np.mod(z_list,1),c='green',alpha=0.6,label=r'$z_ \alpha$')
plt.title(r"Plot of fixed points as $\alpha$ approaches $a$")
plt.xlabel(r"$\alpha$")
plt.ylabel("Fixed point")
plt.legend()
plt.show()
| en | 0.803444 | #Fix number of points in the range to consider #fix a #Create empty figure #Create empty list #Compute y_alpha and z_alpha #Plot graph of fixed points | 3.383685 | 3 |
tests/run_tests.py | lingyunan0510/VIC | 1 | 6614132 | #!/usr/bin/env python
'''VIC testing command line interface'''
from __future__ import print_function
import os
import sys
import glob
import argparse
import datetime
from collections import OrderedDict
import string
import warnings
import pytest
from tonic.models.vic.vic import VIC, default_vic_valgrind_suppressions_path
from tonic.io import read_config, read_configobj
from tonic.testing import VICTestError
from test_utils import (
setup_test_dirs, print_test_dict,
replace_global_values, drop_tests, pop_run_kwargs,
check_returncode, process_error,
test_classic_driver_all_complete,
test_classic_driver_no_output_file_nans,
find_global_param_value,
check_multistream_classic,
setup_subdirs_and_fill_in_global_param_driver_match_test,
check_drivers_match_fluxes,
plot_science_tests)
from test_image_driver import (test_image_driver_no_output_file_nans,
setup_subdirs_and_fill_in_global_param_mpi_test,
check_mpi_fluxes, check_mpi_states)
from test_restart import (prepare_restart_run_periods,
setup_subdirs_and_fill_in_global_param_restart_test,
check_exact_restart_fluxes,
check_exact_restart_states)
test_dir = os.path.dirname(os.path.abspath(__file__))
# Set path to valgrind supressions file if not already set.
if 'VIC_VALGRIND_SUPPRESSIONS' not in os.environ:
sup_file = os.path.join(test_dir, default_vic_valgrind_suppressions_path)
if os.path.isfile(sup_file):
os.environ["VIC_VALGRIND_SUPPRESSIONS"] = sup_file
OUTPUT_WIDTH = 100
description = '''
VIC Test Suite
-------------------------------------------------------------------------------
This is the VIC Test Suite. There are six main test types:
1. unit: function level tests.
2. system: tests that aim to address model runtime issues. These tests
are generally very quick.
* configuration errors - tests that address model startup and error
checking.
* restart: tests that address model state and restart capacity.
* I/O: tests that address model input and output functionality.
* forcings come out the way the come in
* parameter files are appropriately read in and allocated.
3. science: tests that aim to assess the model's scientific skill.
Many of these tests are compared to observations of some kind.
4. examples: a set of examples that users may download and run.
5. release: longer, full domain simulations performed prior to release
demonstrating model output for a final release.
-------------------------------------------------------------------------------
'''
epilog = '''
-------------------------------------------------------------------------------
For questions about the development or use of VIC or use of this test module,
please email the VIC users list serve at <EMAIL>.
-------------------------------------------------------------------------------
'''
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class TestResults(object):
def __init__(self, name, test_complete=False, passed=False,
comment='', error_message='', returncode=None):
self.name = name
self.test_complete = test_complete
self.passed = passed
self.comment = comment
self.error_message = error_message
self.returncode = returncode
def __repr__(self):
r = '''
{0} test results:
Passed: {1}
Comment:{2}
Return Code: {3}
'''.format(self.name, self.passed, self.comment, self.returncode)
return r
def __str__(self):
return '{0} test results:Passed: {1}, Comment:{2}'.format(self.name,
self.passed,
self.comment)
def main():
'''
Run VIC tests
'''
# dates and times
starttime = datetime.datetime.now()
ymd = starttime.strftime('%Y%m%d')
# Parse arguments
test_results = OrderedDict()
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=CustomFormatter)
parser.add_argument('tests', type=str,
help='Test sets to run',
choices=['all', 'unit', 'system', 'science',
'examples', 'release'],
default=['unit', 'system'], nargs='+')
parser.add_argument('--system', type=str,
help='system tests configuration file',
default=os.path.join(test_dir,
'system/system_tests.cfg'))
parser.add_argument('--science', type=str,
help='science tests configuration file',
default=os.path.join(test_dir, 'science/science.cfg'))
parser.add_argument('--examples', type=str,
help='examples tests configuration file',
default=os.path.join(test_dir,
'examples/examples.cfg'))
parser.add_argument('--release', type=str,
help='release tests configuration file',
default=os.path.join(test_dir, 'release/release.cfg'))
parser.add_argument('--classic', type=str,
help='classic driver executable to test')
parser.add_argument('--image', type=str,
help='image driver executable to test')
parser.add_argument('--output_dir', type=str,
help='directory to write test output to',
default='$WORKDIR/VIC_tests_{0}'.format(ymd))
parser.add_argument('--data_dir', type=str,
help='directory to find test data',
default='./samples/VIC_sample_data')
parser.add_argument('--science_test_data_dir', type=str,
help='directory to find science test data',
default='./samples/VIC_sample_data')
parser.add_argument('--nproc', type=int,
help='number of processors to use for science tests',
default=1)
args = parser.parse_args()
# Define test directories
data_dir = args.data_dir
out_dir = os.path.expandvars(args.output_dir)
os.makedirs(out_dir, exist_ok=True)
# check to make sure science test data directory exists
science_test_data_dir = args.science_test_data_dir
if 'science' in args.tests and not os.path.exists(science_test_data_dir):
raise VICTestError('directory for science test data does not exist or '
'has not been defined')
# Validate input directories
if not (len(args.tests) == 1 and args.tests[0] == 'unit'):
for d in [data_dir, test_dir]:
if not os.path.exists(d):
raise VICTestError('Directory: {0} does not exist'.format(d))
# Print welcome information
print(description)
print('\nStarting tests now...Start Time: {0}\n'.format(starttime))
print('Running Test Set: {0}'.format(', '.join(args.tests)))
# Setup VIC executable
# --- if not only unit test --- #
if not (len(args.tests) == 1 and args.tests[0] == 'unit'):
dict_drivers = {}
if args.classic:
dict_drivers['classic'] = VIC(args.classic)
print('VIC classic version information:\n\n{0}'.format(
dict_drivers['classic'].version.decode()))
if args.image:
dict_drivers['image'] = VIC(args.image)
print('VIC image version information:\n\n{0}'.format(
dict_drivers['image'].version.decode()))
# run test sets
# unit
if any(i in ['all', 'unit'] for i in args.tests):
test_results['unit'] = run_unit_tests(test_dir)
# system
if any(i in ['all', 'system'] for i in args.tests):
test_results['system'] = run_system(args.system, dict_drivers,
data_dir,
os.path.join(out_dir,
'system'))
# science
if any(i in ['all', 'science'] for i in args.tests):
test_results['science'] = run_science(
args.science, dict_drivers['classic'],
science_test_data_dir,
data_dir,
os.path.join(out_dir, 'science'),
'classic',
args.nproc)
# examples
if any(i in ['all', 'examples'] for i in args.tests):
if len(dict_drivers) == 1: # if only one driver
driver = list(dict_drivers.keys())[0]
vic_exe = dict_drivers[driver]
test_results['examples'] = run_examples(args.examples, vic_exe,
data_dir,
os.path.join(
out_dir, 'examples'),
driver)
else:
raise ValueError('example test only supports single driver')
# release
if any(i in ['all', 'release'] for i in args.tests):
test_results['release'] = run_release(args.release)
# Print test results
summary = OrderedDict()
failed = 0
print('\nTest Results:')
for test_set, results in test_results.items():
print('-'.ljust(OUTPUT_WIDTH, '-'))
print(test_set.center(OUTPUT_WIDTH))
print('-'.ljust(OUTPUT_WIDTH, '-'))
print_test_dict(results)
summary[test_set] = 0
for r in results.values():
if not r.passed:
summary[test_set] += 1
failed += summary[test_set]
print('\nTest Summary:')
print('-'.ljust(OUTPUT_WIDTH, '-'))
for test_set, r in summary.items():
print('Failed tests in {0}: {1}'.format(test_set, r))
print('-'.ljust(OUTPUT_WIDTH, '-'))
# end date and times
endtime = datetime.datetime.now()
elapsed = endtime - starttime
print('\nFinished testing VIC. Endtime: {0}'.format(endtime))
print('Time elapsed during testing: {0}\n'.format(elapsed))
# return exit code
sys.exit(failed)
def run_unit_tests(test_dir):
'''Run unittests in test_dir
Parameters
----------
test_dir : str
Path to unittests
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_system
run_examples
run_science
run_release
'''
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Unit Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
retcode = pytest.main(['-x', os.path.join(test_dir, 'unit'), '--boxed'])
return {'unittests': TestResults('unittests',
test_complete=True,
passed=retcode == 0,
comment='see stdout from pytest',
returncode=retcode)}
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Finished unit tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
def run_system(config_file, dict_drivers, test_data_dir, out_dir):
'''Run system tests from config file
Parameters
----------
config_file : str
Configuration file for system tests.
dict_drivers : dict
Keys: driver names {'classic', 'image'}
Content: corresponding VIC executable object (see tonic documentation)
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_examples
run_science
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running System Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_configobj(config_file)
# Process driver info
if len(dict_drivers) == 1: # if single driver
driver = list(dict_drivers.keys())[0]
vic_exe = dict_drivers[driver]
# Drop invalid driver tests
if len(dict_drivers) == 1: # if single driver
config = drop_tests(config, driver)
else: # if multiple drivers
config = drop_tests(config, list(dict_drivers))
test_results = OrderedDict()
# Run individual system tests
for i, (testname, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
testname))
# Setup directories for test
dirs = setup_test_dirs(testname, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
dict_global_param = {}
# --- if single driver --- #
if len(dict_drivers) == 1:
infile = os.path.join(test_dir, 'system',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
dict_global_param[driver] = global_file.read()
# --- if multiple drivers --- #
else:
for j, dr in enumerate(test_dict['driver']):
infile = os.path.join(test_dir, 'system',
test_dict['global_parameter_file'][j])
with open(infile, 'r') as global_file:
dict_global_param[dr] = global_file.read()
# If restart test, prepare running periods
if 'exact_restart' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for restart'
'tests!')
global_param = dict_global_param[driver]
# () Find STATE_FORMAT option for later use
if driver == 'classic':
state_format = find_global_param_value(global_param,
'STATE_FORMAT')
# (2) Prepare running periods and initial state file info for
# restart test
run_periods = prepare_restart_run_periods(
test_dict['restart'],
dirs['state'])
# If mpi test, prepare a list of number of processors to be run
elif 'mpi' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for MPI'
'tests!')
if not isinstance(test_dict['mpi']['n_proc'], list):
raise ValueError('Need at least two values in n_proc to run'
'mpi test!')
list_n_proc = test_dict['mpi']['n_proc']
# create template string
dict_s = {}
for dr, global_param in dict_global_param.items():
dict_s[dr] = string.Template(global_param)
# fill in global parameter options
# --- if restart test, multiple runs --- #
if 'exact_restart' in test_dict['check']:
s = dict_s[driver]
# Set up subdirectories and fill in global parameter options
# for restart testing
list_global_param =\
setup_subdirs_and_fill_in_global_param_restart_test(
s, run_periods, driver, dirs['results'], dirs['state'],
test_data_dir)
# --- if mpi test, multiple runs --- #
elif 'mpi' in test_dict['check']:
s = dict_s[driver]
# Set up subdirectories and output directories in global file for
# multiprocessor testing
list_global_param = \
setup_subdirs_and_fill_in_global_param_mpi_test(
s, list_n_proc, dirs['results'], dirs['state'],
test_data_dir)
# --- if driver-match test, one run for each driver --- #
elif 'driver_match' in test_dict['check']:
# Set up subdirectories and output directories in global file for
# driver-match testing
dict_global_param = \
setup_subdirs_and_fill_in_global_param_driver_match_test(
dict_s, dirs['results'], dirs['state'], test_data_dir)
# --- else, single run --- #
else:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for test'
'{}!'.format(testname))
s = dict_s[driver]
global_param = s.safe_substitute(test_data_dir=test_data_dir,
result_dir=dirs['results'],
state_dir=dirs['state'])
# replace global options from config file
# --- extract global options to be substitute --- #
if 'options' in test_dict:
replacements = test_dict['options']
else:
replacements = OrderedDict()
# --- replace global options --- #
# For the purpose of exact restart, if STATE_FORMAT is specified,
# then record the specified value (instead of the one in the global
# template file)
if 'exact_restart' in test_dict['check']:
if 'STATE_FORMAT' in replacements:
state_format = replacements['STATE_FORMAT']
if 'exact_restart' in test_dict['check'] or\
'mpi' in test_dict['check']: # if multiple runs
for j, gp in enumerate(list_global_param):
# save a copy of replacements for the next global file
replacements_cp = replacements.copy()
# replace global options for this global file
list_global_param[j] = replace_global_values(gp, replacements)
replacements = replacements_cp
elif 'driver_match' in test_dict['check']: # if cross-driver runs
for dr, gp in dict_global_param.items():
# save a copy of replacements for the next global file
replacements_cp = replacements.copy()
# replace global options for this global file
dict_global_param[dr] = replace_global_values(gp,
replacements)
replacements = replacements_cp
else: # if single run
global_param = replace_global_values(global_param, replacements)
# write global parameter file
if 'exact_restart' in test_dict['check']:
list_test_global_file = []
for j, gp in enumerate(list_global_param):
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_{}_{}.txt'.format(
testname,
run_periods[j]['start_date'].strftime("%Y%m%d"),
run_periods[j]['end_date'].strftime("%Y%m%d")))
list_test_global_file.append(test_global_file)
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
elif 'mpi' in test_dict['check']:
list_test_global_file = []
for j, gp in enumerate(list_global_param):
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_processors_{}.txt'.format(
testname, list_n_proc[j]))
list_test_global_file.append(test_global_file)
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
elif 'driver_match' in test_dict['check']:
dict_test_global_file = {}
for dr, gp in dict_global_param.items():
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_{}.txt'.format(
testname, dr))
dict_test_global_file[dr] = test_global_file
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
else:
test_global_file = os.path.join(
dirs['test'],
'{0}_globalparam.txt'.format(testname))
with open(test_global_file, mode='w') as f:
for line in global_param:
f.write(line)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
if 'exact_restart' in test_dict['check']:
for j, test_global_file in enumerate(list_test_global_file):
returncode = vic_exe.run(test_global_file,
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
elif 'mpi' in test_dict['check']:
for j, test_global_file in enumerate(list_test_global_file):
# Overwrite mpi_proc in option kwargs
n_proc = list_n_proc[j]
if n_proc == 1:
run_kwargs['mpi_proc'] = None
else:
run_kwargs['mpi_proc'] = list_n_proc[j]
# Run VIC
returncode = vic_exe.run(test_global_file,
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
elif 'driver_match' in test_dict['check']:
for dr in dict_test_global_file.keys():
# Reset mpi_proc in option kwargs to None for classic
# driver run
if dr == 'classic':
run_kwargs_classic = run_kwargs
run_kwargs_classic['mpi_proc'] = None
returncode = dict_drivers[dr].run(
dict_test_global_file[dr],
logdir=dirs['logs'],
**run_kwargs_classic)
else:
returncode = dict_drivers[dr].run(
dict_test_global_file[dr],
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(dict_drivers[dr],
test_dict.pop('expected_retval', 0))
else:
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
test_complete = True
# check output files (different tests depending on driver)
if 'check' in test_dict:
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check']:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for '
'complete check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
test_classic_driver_all_complete(fnames)
else:
raise RuntimeError('complete check only supports '
'classic driver')
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for '
'output_file_nans check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(
test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(
fnames,
domain_file)
else:
raise ValueError('unknown driver')
# check for exact restarts
if 'exact_restart' in test_dict['check']:
check_exact_restart_fluxes(dirs['results'], driver,
run_periods)
if driver == 'classic':
check_exact_restart_states(dirs['state'], driver,
run_periods,
state_format)
elif driver == 'image':
check_exact_restart_states(dirs['state'], driver,
run_periods)
else:
raise ValueError('unknown driver')
# check for multistream output
if 'multistream' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for '
'multistream check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
check_multistream_classic(fnames)
elif driver == 'image':
warnings.warn('Skipping multistream image driver test')
# TODO: check_multistream_image(fnames)
# check for mpi multiprocessor results
if 'mpi' in test_dict['check']:
check_mpi_fluxes(dirs['results'], list_n_proc)
check_mpi_states(dirs['state'], list_n_proc)
# check that results from different drivers match
if 'driver_match' in test_dict['check']:
check_drivers_match_fluxes(list(dict_drivers.keys()),
dirs['results'])
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
for dr, exe in dict_drivers.items():
test_comment, error_message = process_error(e, exe)
# record the test results
test_results[testname] = TestResults(testname,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing system tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_science(config_file, vic_exe, science_test_data_dir,
test_data_dir, out_dir, driver, nproc):
'''Run science tests from config file
Parameters
----------
config_file : str
Configuration file for science tests.
vic_exe : VIC (object)
VIC executable object (see tonic documentation).
science_test_data_dir: str
Path to science test data sets (archived VIC runs and observations)
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
driver : {'classic', 'image'}
Driver to run tests on.
nproc : int
Number of processors to use for science tests
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_examples
run_system
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Science Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_configobj(config_file)
# drop invalid driver tests
config = drop_tests(config, driver)
test_results = OrderedDict()
# Run individual tests
for i, (test_type, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
test_type))
# Setup directories for test
dirs = setup_test_dirs(test_type, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
infile = os.path.join(test_dir, 'science',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
global_param = global_file.read()
# create template string
s = string.Template(global_param)
# fill in global parameter options
global_param = s.safe_substitute(test_data_dir=science_test_data_dir,
test_dir=test_dir,
result_dir=dirs['results'],
state_dir=dirs['state'],
testname=test_type,
test_root=test_dir)
test_global_file = os.path.join(
dirs['test'], '{0}_globalparam.txt'.format(test_type))
# write global parameter file
with open(test_global_file, 'w') as f:
f.write(global_param)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
# Run the VIC simulation
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
test_complete = True
# Check return code
check_returncode(vic_exe)
# check output files (different tests depending on driver)
if test_dict['check']:
fnames = glob.glob(os.path.join(dirs['results'], '*'))
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check'] and driver == 'classic':
test_classic_driver_all_complete(fnames)
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(fnames,
domain_file)
else:
raise ValueError('unknown driver')
# plot science test results
plot_science_tests(test_dict['driver'],
test_type,
science_test_data_dir,
dirs['results'],
dirs['plots'],
test_dict['plots'],
test_dict['compare_data'],
nproc=nproc)
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
test_comment, error_message = process_error(e, vic_exe)
# record the test results
test_results[test_type] = TestResults(test_type,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing science tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_examples(config_file, vic_exe, test_data_dir, out_dir, driver):
'''Run examples tests from config file
Parameters
----------
config_file : str
Configuration file for example tests.
vic_exe : VIC (object)
VIC executable object (see tonic documentation).
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
driver : {'classic', 'image'}
Driver to run tests on.
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_system
run_science
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Examples')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_config(config_file)
# drop invalid driver tests
config = drop_tests(config, driver)
test_results = OrderedDict()
# Run individual examples
for i, (testname, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
testname))
# Setup directories for test
dirs = setup_test_dirs(testname, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
infile = os.path.join(test_dir, 'examples',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
global_param = global_file.read()
# create template string
s = string.Template(global_param)
# fill in global parameter options
global_param = s.safe_substitute(test_data_dir=test_data_dir,
result_dir=dirs['results'],
state_dir=dirs['state'],
testname=testname,
test_root=test_dir)
test_global_file = os.path.join(dirs['test'],
'{0}_globalparam.txt'.format(testname))
# write global parameter file
with open(test_global_file, 'w') as f:
f.write(global_param)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
# Run the VIC simulation
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
test_complete = True
# Check return code
check_returncode(vic_exe)
# check output files (different tests depending on driver)
if test_dict['check']:
fnames = glob.glob(os.path.join(dirs['results'], '*'))
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check'] and driver == 'classic':
test_classic_driver_all_complete(fnames)
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(fnames,
domain_file)
else:
raise ValueError('unknown driver')
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
test_comment, error_message = process_error(e, vic_exe)
# record the test results
test_results[testname] = TestResults(testname,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
# Print examples footer
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing examples.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_release(config_file):
'''Run release from config file
NOT IMPLEMENTED
'''
return OrderedDict()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
'''VIC testing command line interface'''
from __future__ import print_function
import os
import sys
import glob
import argparse
import datetime
from collections import OrderedDict
import string
import warnings
import pytest
from tonic.models.vic.vic import VIC, default_vic_valgrind_suppressions_path
from tonic.io import read_config, read_configobj
from tonic.testing import VICTestError
from test_utils import (
setup_test_dirs, print_test_dict,
replace_global_values, drop_tests, pop_run_kwargs,
check_returncode, process_error,
test_classic_driver_all_complete,
test_classic_driver_no_output_file_nans,
find_global_param_value,
check_multistream_classic,
setup_subdirs_and_fill_in_global_param_driver_match_test,
check_drivers_match_fluxes,
plot_science_tests)
from test_image_driver import (test_image_driver_no_output_file_nans,
setup_subdirs_and_fill_in_global_param_mpi_test,
check_mpi_fluxes, check_mpi_states)
from test_restart import (prepare_restart_run_periods,
setup_subdirs_and_fill_in_global_param_restart_test,
check_exact_restart_fluxes,
check_exact_restart_states)
test_dir = os.path.dirname(os.path.abspath(__file__))
# Set path to valgrind supressions file if not already set.
if 'VIC_VALGRIND_SUPPRESSIONS' not in os.environ:
sup_file = os.path.join(test_dir, default_vic_valgrind_suppressions_path)
if os.path.isfile(sup_file):
os.environ["VIC_VALGRIND_SUPPRESSIONS"] = sup_file
OUTPUT_WIDTH = 100
description = '''
VIC Test Suite
-------------------------------------------------------------------------------
This is the VIC Test Suite. There are six main test types:
1. unit: function level tests.
2. system: tests that aim to address model runtime issues. These tests
are generally very quick.
* configuration errors - tests that address model startup and error
checking.
* restart: tests that address model state and restart capacity.
* I/O: tests that address model input and output functionality.
* forcings come out the way the come in
* parameter files are appropriately read in and allocated.
3. science: tests that aim to assess the model's scientific skill.
Many of these tests are compared to observations of some kind.
4. examples: a set of examples that users may download and run.
5. release: longer, full domain simulations performed prior to release
demonstrating model output for a final release.
-------------------------------------------------------------------------------
'''
epilog = '''
-------------------------------------------------------------------------------
For questions about the development or use of VIC or use of this test module,
please email the VIC users list serve at <EMAIL>.
-------------------------------------------------------------------------------
'''
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class TestResults(object):
def __init__(self, name, test_complete=False, passed=False,
comment='', error_message='', returncode=None):
self.name = name
self.test_complete = test_complete
self.passed = passed
self.comment = comment
self.error_message = error_message
self.returncode = returncode
def __repr__(self):
r = '''
{0} test results:
Passed: {1}
Comment:{2}
Return Code: {3}
'''.format(self.name, self.passed, self.comment, self.returncode)
return r
def __str__(self):
return '{0} test results:Passed: {1}, Comment:{2}'.format(self.name,
self.passed,
self.comment)
def main():
'''
Run VIC tests
'''
# dates and times
starttime = datetime.datetime.now()
ymd = starttime.strftime('%Y%m%d')
# Parse arguments
test_results = OrderedDict()
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=CustomFormatter)
parser.add_argument('tests', type=str,
help='Test sets to run',
choices=['all', 'unit', 'system', 'science',
'examples', 'release'],
default=['unit', 'system'], nargs='+')
parser.add_argument('--system', type=str,
help='system tests configuration file',
default=os.path.join(test_dir,
'system/system_tests.cfg'))
parser.add_argument('--science', type=str,
help='science tests configuration file',
default=os.path.join(test_dir, 'science/science.cfg'))
parser.add_argument('--examples', type=str,
help='examples tests configuration file',
default=os.path.join(test_dir,
'examples/examples.cfg'))
parser.add_argument('--release', type=str,
help='release tests configuration file',
default=os.path.join(test_dir, 'release/release.cfg'))
parser.add_argument('--classic', type=str,
help='classic driver executable to test')
parser.add_argument('--image', type=str,
help='image driver executable to test')
parser.add_argument('--output_dir', type=str,
help='directory to write test output to',
default='$WORKDIR/VIC_tests_{0}'.format(ymd))
parser.add_argument('--data_dir', type=str,
help='directory to find test data',
default='./samples/VIC_sample_data')
parser.add_argument('--science_test_data_dir', type=str,
help='directory to find science test data',
default='./samples/VIC_sample_data')
parser.add_argument('--nproc', type=int,
help='number of processors to use for science tests',
default=1)
args = parser.parse_args()
# Define test directories
data_dir = args.data_dir
out_dir = os.path.expandvars(args.output_dir)
os.makedirs(out_dir, exist_ok=True)
# check to make sure science test data directory exists
science_test_data_dir = args.science_test_data_dir
if 'science' in args.tests and not os.path.exists(science_test_data_dir):
raise VICTestError('directory for science test data does not exist or '
'has not been defined')
# Validate input directories
if not (len(args.tests) == 1 and args.tests[0] == 'unit'):
for d in [data_dir, test_dir]:
if not os.path.exists(d):
raise VICTestError('Directory: {0} does not exist'.format(d))
# Print welcome information
print(description)
print('\nStarting tests now...Start Time: {0}\n'.format(starttime))
print('Running Test Set: {0}'.format(', '.join(args.tests)))
# Setup VIC executable
# --- if not only unit test --- #
if not (len(args.tests) == 1 and args.tests[0] == 'unit'):
dict_drivers = {}
if args.classic:
dict_drivers['classic'] = VIC(args.classic)
print('VIC classic version information:\n\n{0}'.format(
dict_drivers['classic'].version.decode()))
if args.image:
dict_drivers['image'] = VIC(args.image)
print('VIC image version information:\n\n{0}'.format(
dict_drivers['image'].version.decode()))
# run test sets
# unit
if any(i in ['all', 'unit'] for i in args.tests):
test_results['unit'] = run_unit_tests(test_dir)
# system
if any(i in ['all', 'system'] for i in args.tests):
test_results['system'] = run_system(args.system, dict_drivers,
data_dir,
os.path.join(out_dir,
'system'))
# science
if any(i in ['all', 'science'] for i in args.tests):
test_results['science'] = run_science(
args.science, dict_drivers['classic'],
science_test_data_dir,
data_dir,
os.path.join(out_dir, 'science'),
'classic',
args.nproc)
# examples
if any(i in ['all', 'examples'] for i in args.tests):
if len(dict_drivers) == 1: # if only one driver
driver = list(dict_drivers.keys())[0]
vic_exe = dict_drivers[driver]
test_results['examples'] = run_examples(args.examples, vic_exe,
data_dir,
os.path.join(
out_dir, 'examples'),
driver)
else:
raise ValueError('example test only supports single driver')
# release
if any(i in ['all', 'release'] for i in args.tests):
test_results['release'] = run_release(args.release)
# Print test results
summary = OrderedDict()
failed = 0
print('\nTest Results:')
for test_set, results in test_results.items():
print('-'.ljust(OUTPUT_WIDTH, '-'))
print(test_set.center(OUTPUT_WIDTH))
print('-'.ljust(OUTPUT_WIDTH, '-'))
print_test_dict(results)
summary[test_set] = 0
for r in results.values():
if not r.passed:
summary[test_set] += 1
failed += summary[test_set]
print('\nTest Summary:')
print('-'.ljust(OUTPUT_WIDTH, '-'))
for test_set, r in summary.items():
print('Failed tests in {0}: {1}'.format(test_set, r))
print('-'.ljust(OUTPUT_WIDTH, '-'))
# end date and times
endtime = datetime.datetime.now()
elapsed = endtime - starttime
print('\nFinished testing VIC. Endtime: {0}'.format(endtime))
print('Time elapsed during testing: {0}\n'.format(elapsed))
# return exit code
sys.exit(failed)
def run_unit_tests(test_dir):
'''Run unittests in test_dir
Parameters
----------
test_dir : str
Path to unittests
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_system
run_examples
run_science
run_release
'''
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Unit Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
retcode = pytest.main(['-x', os.path.join(test_dir, 'unit'), '--boxed'])
return {'unittests': TestResults('unittests',
test_complete=True,
passed=retcode == 0,
comment='see stdout from pytest',
returncode=retcode)}
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Finished unit tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
def run_system(config_file, dict_drivers, test_data_dir, out_dir):
'''Run system tests from config file
Parameters
----------
config_file : str
Configuration file for system tests.
dict_drivers : dict
Keys: driver names {'classic', 'image'}
Content: corresponding VIC executable object (see tonic documentation)
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_examples
run_science
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running System Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_configobj(config_file)
# Process driver info
if len(dict_drivers) == 1: # if single driver
driver = list(dict_drivers.keys())[0]
vic_exe = dict_drivers[driver]
# Drop invalid driver tests
if len(dict_drivers) == 1: # if single driver
config = drop_tests(config, driver)
else: # if multiple drivers
config = drop_tests(config, list(dict_drivers))
test_results = OrderedDict()
# Run individual system tests
for i, (testname, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
testname))
# Setup directories for test
dirs = setup_test_dirs(testname, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
dict_global_param = {}
# --- if single driver --- #
if len(dict_drivers) == 1:
infile = os.path.join(test_dir, 'system',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
dict_global_param[driver] = global_file.read()
# --- if multiple drivers --- #
else:
for j, dr in enumerate(test_dict['driver']):
infile = os.path.join(test_dir, 'system',
test_dict['global_parameter_file'][j])
with open(infile, 'r') as global_file:
dict_global_param[dr] = global_file.read()
# If restart test, prepare running periods
if 'exact_restart' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for restart'
'tests!')
global_param = dict_global_param[driver]
# () Find STATE_FORMAT option for later use
if driver == 'classic':
state_format = find_global_param_value(global_param,
'STATE_FORMAT')
# (2) Prepare running periods and initial state file info for
# restart test
run_periods = prepare_restart_run_periods(
test_dict['restart'],
dirs['state'])
# If mpi test, prepare a list of number of processors to be run
elif 'mpi' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for MPI'
'tests!')
if not isinstance(test_dict['mpi']['n_proc'], list):
raise ValueError('Need at least two values in n_proc to run'
'mpi test!')
list_n_proc = test_dict['mpi']['n_proc']
# create template string
dict_s = {}
for dr, global_param in dict_global_param.items():
dict_s[dr] = string.Template(global_param)
# fill in global parameter options
# --- if restart test, multiple runs --- #
if 'exact_restart' in test_dict['check']:
s = dict_s[driver]
# Set up subdirectories and fill in global parameter options
# for restart testing
list_global_param =\
setup_subdirs_and_fill_in_global_param_restart_test(
s, run_periods, driver, dirs['results'], dirs['state'],
test_data_dir)
# --- if mpi test, multiple runs --- #
elif 'mpi' in test_dict['check']:
s = dict_s[driver]
# Set up subdirectories and output directories in global file for
# multiprocessor testing
list_global_param = \
setup_subdirs_and_fill_in_global_param_mpi_test(
s, list_n_proc, dirs['results'], dirs['state'],
test_data_dir)
# --- if driver-match test, one run for each driver --- #
elif 'driver_match' in test_dict['check']:
# Set up subdirectories and output directories in global file for
# driver-match testing
dict_global_param = \
setup_subdirs_and_fill_in_global_param_driver_match_test(
dict_s, dirs['results'], dirs['state'], test_data_dir)
# --- else, single run --- #
else:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for test'
'{}!'.format(testname))
s = dict_s[driver]
global_param = s.safe_substitute(test_data_dir=test_data_dir,
result_dir=dirs['results'],
state_dir=dirs['state'])
# replace global options from config file
# --- extract global options to be substitute --- #
if 'options' in test_dict:
replacements = test_dict['options']
else:
replacements = OrderedDict()
# --- replace global options --- #
# For the purpose of exact restart, if STATE_FORMAT is specified,
# then record the specified value (instead of the one in the global
# template file)
if 'exact_restart' in test_dict['check']:
if 'STATE_FORMAT' in replacements:
state_format = replacements['STATE_FORMAT']
if 'exact_restart' in test_dict['check'] or\
'mpi' in test_dict['check']: # if multiple runs
for j, gp in enumerate(list_global_param):
# save a copy of replacements for the next global file
replacements_cp = replacements.copy()
# replace global options for this global file
list_global_param[j] = replace_global_values(gp, replacements)
replacements = replacements_cp
elif 'driver_match' in test_dict['check']: # if cross-driver runs
for dr, gp in dict_global_param.items():
# save a copy of replacements for the next global file
replacements_cp = replacements.copy()
# replace global options for this global file
dict_global_param[dr] = replace_global_values(gp,
replacements)
replacements = replacements_cp
else: # if single run
global_param = replace_global_values(global_param, replacements)
# write global parameter file
if 'exact_restart' in test_dict['check']:
list_test_global_file = []
for j, gp in enumerate(list_global_param):
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_{}_{}.txt'.format(
testname,
run_periods[j]['start_date'].strftime("%Y%m%d"),
run_periods[j]['end_date'].strftime("%Y%m%d")))
list_test_global_file.append(test_global_file)
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
elif 'mpi' in test_dict['check']:
list_test_global_file = []
for j, gp in enumerate(list_global_param):
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_processors_{}.txt'.format(
testname, list_n_proc[j]))
list_test_global_file.append(test_global_file)
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
elif 'driver_match' in test_dict['check']:
dict_test_global_file = {}
for dr, gp in dict_global_param.items():
test_global_file = os.path.join(
dirs['test'],
'{}_globalparam_{}.txt'.format(
testname, dr))
dict_test_global_file[dr] = test_global_file
with open(test_global_file, mode='w') as f:
for line in gp:
f.write(line)
else:
test_global_file = os.path.join(
dirs['test'],
'{0}_globalparam.txt'.format(testname))
with open(test_global_file, mode='w') as f:
for line in global_param:
f.write(line)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
if 'exact_restart' in test_dict['check']:
for j, test_global_file in enumerate(list_test_global_file):
returncode = vic_exe.run(test_global_file,
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
elif 'mpi' in test_dict['check']:
for j, test_global_file in enumerate(list_test_global_file):
# Overwrite mpi_proc in option kwargs
n_proc = list_n_proc[j]
if n_proc == 1:
run_kwargs['mpi_proc'] = None
else:
run_kwargs['mpi_proc'] = list_n_proc[j]
# Run VIC
returncode = vic_exe.run(test_global_file,
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
elif 'driver_match' in test_dict['check']:
for dr in dict_test_global_file.keys():
# Reset mpi_proc in option kwargs to None for classic
# driver run
if dr == 'classic':
run_kwargs_classic = run_kwargs
run_kwargs_classic['mpi_proc'] = None
returncode = dict_drivers[dr].run(
dict_test_global_file[dr],
logdir=dirs['logs'],
**run_kwargs_classic)
else:
returncode = dict_drivers[dr].run(
dict_test_global_file[dr],
logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(dict_drivers[dr],
test_dict.pop('expected_retval', 0))
else:
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
# Check return code
check_returncode(vic_exe,
test_dict.pop('expected_retval', 0))
test_complete = True
# check output files (different tests depending on driver)
if 'check' in test_dict:
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check']:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for '
'complete check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
test_classic_driver_all_complete(fnames)
else:
raise RuntimeError('complete check only supports '
'classic driver')
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if len(dict_drivers) > 1:
raise RuntimeError('Only support single driver for '
'output_file_nans check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(
test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(
fnames,
domain_file)
else:
raise ValueError('unknown driver')
# check for exact restarts
if 'exact_restart' in test_dict['check']:
check_exact_restart_fluxes(dirs['results'], driver,
run_periods)
if driver == 'classic':
check_exact_restart_states(dirs['state'], driver,
run_periods,
state_format)
elif driver == 'image':
check_exact_restart_states(dirs['state'], driver,
run_periods)
else:
raise ValueError('unknown driver')
# check for multistream output
if 'multistream' in test_dict['check']:
if len(dict_drivers) > 1:
raise ValueError('Only support single driver for '
'multistream check')
fnames = glob.glob(os.path.join(dirs['results'], '*'))
if driver == 'classic':
check_multistream_classic(fnames)
elif driver == 'image':
warnings.warn('Skipping multistream image driver test')
# TODO: check_multistream_image(fnames)
# check for mpi multiprocessor results
if 'mpi' in test_dict['check']:
check_mpi_fluxes(dirs['results'], list_n_proc)
check_mpi_states(dirs['state'], list_n_proc)
# check that results from different drivers match
if 'driver_match' in test_dict['check']:
check_drivers_match_fluxes(list(dict_drivers.keys()),
dirs['results'])
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
for dr, exe in dict_drivers.items():
test_comment, error_message = process_error(e, exe)
# record the test results
test_results[testname] = TestResults(testname,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing system tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_science(config_file, vic_exe, science_test_data_dir,
test_data_dir, out_dir, driver, nproc):
'''Run science tests from config file
Parameters
----------
config_file : str
Configuration file for science tests.
vic_exe : VIC (object)
VIC executable object (see tonic documentation).
science_test_data_dir: str
Path to science test data sets (archived VIC runs and observations)
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
driver : {'classic', 'image'}
Driver to run tests on.
nproc : int
Number of processors to use for science tests
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_examples
run_system
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Science Tests')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_configobj(config_file)
# drop invalid driver tests
config = drop_tests(config, driver)
test_results = OrderedDict()
# Run individual tests
for i, (test_type, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
test_type))
# Setup directories for test
dirs = setup_test_dirs(test_type, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
infile = os.path.join(test_dir, 'science',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
global_param = global_file.read()
# create template string
s = string.Template(global_param)
# fill in global parameter options
global_param = s.safe_substitute(test_data_dir=science_test_data_dir,
test_dir=test_dir,
result_dir=dirs['results'],
state_dir=dirs['state'],
testname=test_type,
test_root=test_dir)
test_global_file = os.path.join(
dirs['test'], '{0}_globalparam.txt'.format(test_type))
# write global parameter file
with open(test_global_file, 'w') as f:
f.write(global_param)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
# Run the VIC simulation
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
test_complete = True
# Check return code
check_returncode(vic_exe)
# check output files (different tests depending on driver)
if test_dict['check']:
fnames = glob.glob(os.path.join(dirs['results'], '*'))
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check'] and driver == 'classic':
test_classic_driver_all_complete(fnames)
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(fnames,
domain_file)
else:
raise ValueError('unknown driver')
# plot science test results
plot_science_tests(test_dict['driver'],
test_type,
science_test_data_dir,
dirs['results'],
dirs['plots'],
test_dict['plots'],
test_dict['compare_data'],
nproc=nproc)
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
test_comment, error_message = process_error(e, vic_exe)
# record the test results
test_results[test_type] = TestResults(test_type,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing science tests.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_examples(config_file, vic_exe, test_data_dir, out_dir, driver):
'''Run examples tests from config file
Parameters
----------
config_file : str
Configuration file for example tests.
vic_exe : VIC (object)
VIC executable object (see tonic documentation).
test_data_dir : str
Path to test data sets.
out_dir : str
Path to output location
driver : {'classic', 'image'}
Driver to run tests on.
Returns
-------
test_results : dict
Test results for all tests in config_file.
See Also
--------
run_unit_tests
run_system
run_science
run_release
'''
# Print test set welcome
print('\n-'.ljust(OUTPUT_WIDTH + 1, '-'))
print('Running Examples')
print('-'.ljust(OUTPUT_WIDTH, '-'))
# Get setup
config = read_config(config_file)
# drop invalid driver tests
config = drop_tests(config, driver)
test_results = OrderedDict()
# Run individual examples
for i, (testname, test_dict) in enumerate(config.items()):
# print out status info
print('Running test {0}/{1}: {2}'.format(i + 1, len(config.items()),
testname))
# Setup directories for test
dirs = setup_test_dirs(testname, out_dir,
mkdirs=['results', 'state', 'logs', 'plots'])
# read template global parameter file
infile = os.path.join(test_dir, 'examples',
test_dict['global_parameter_file'])
with open(infile, 'r') as global_file:
global_param = global_file.read()
# create template string
s = string.Template(global_param)
# fill in global parameter options
global_param = s.safe_substitute(test_data_dir=test_data_dir,
result_dir=dirs['results'],
state_dir=dirs['state'],
testname=testname,
test_root=test_dir)
test_global_file = os.path.join(dirs['test'],
'{0}_globalparam.txt'.format(testname))
# write global parameter file
with open(test_global_file, 'w') as f:
f.write(global_param)
# Get optional kwargs for run executable
run_kwargs = pop_run_kwargs(test_dict)
# run VIC
test_complete = False
test_passed = False
test_comment = ''
error_message = ''
try:
# Run the VIC simulation
returncode = vic_exe.run(test_global_file, logdir=dirs['logs'],
**run_kwargs)
test_complete = True
# Check return code
check_returncode(vic_exe)
# check output files (different tests depending on driver)
if test_dict['check']:
fnames = glob.glob(os.path.join(dirs['results'], '*'))
# Check that the simulation completed for all grid cells
if 'complete' in test_dict['check'] and driver == 'classic':
test_classic_driver_all_complete(fnames)
# check for nans in all example files
if 'output_file_nans' in test_dict['check']:
if driver == 'classic':
test_classic_driver_no_output_file_nans(fnames)
elif driver == 'image':
domain_file = os.path.join(test_data_dir,
test_dict['domain_file'])
test_image_driver_no_output_file_nans(fnames,
domain_file)
else:
raise ValueError('unknown driver')
# if we got this far, the test passed.
test_passed = True
# Handle errors
except Exception as e:
test_comment, error_message = process_error(e, vic_exe)
# record the test results
test_results[testname] = TestResults(testname,
test_complete=test_complete,
passed=test_passed,
comment=test_comment,
error_message=error_message,
returncode=returncode)
# Print examples footer
print('-'.ljust(OUTPUT_WIDTH, '-'))
print('Finished testing examples.')
print('-'.ljust(OUTPUT_WIDTH, '-'))
return test_results
def run_release(config_file):
'''Run release from config file
NOT IMPLEMENTED
'''
return OrderedDict()
if __name__ == '__main__':
main()
| en | 0.611839 | #!/usr/bin/env python VIC testing command line interface # Set path to valgrind supressions file if not already set. VIC Test Suite ------------------------------------------------------------------------------- This is the VIC Test Suite. There are six main test types: 1. unit: function level tests. 2. system: tests that aim to address model runtime issues. These tests are generally very quick. * configuration errors - tests that address model startup and error checking. * restart: tests that address model state and restart capacity. * I/O: tests that address model input and output functionality. * forcings come out the way the come in * parameter files are appropriately read in and allocated. 3. science: tests that aim to assess the model's scientific skill. Many of these tests are compared to observations of some kind. 4. examples: a set of examples that users may download and run. 5. release: longer, full domain simulations performed prior to release demonstrating model output for a final release. ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- For questions about the development or use of VIC or use of this test module, please email the VIC users list serve at <EMAIL>. ------------------------------------------------------------------------------- {0} test results: Passed: {1} Comment:{2} Return Code: {3} Run VIC tests # dates and times # Parse arguments # Define test directories # check to make sure science test data directory exists # Validate input directories # Print welcome information # Setup VIC executable # --- if not only unit test --- # # run test sets # unit # system # science # examples # if only one driver # release # Print test results # end date and times # return exit code Run unittests in test_dir Parameters ---------- test_dir : str Path to unittests Returns ------- test_results : dict Test results for all tests in config_file. See Also -------- run_system run_examples run_science run_release Run system tests from config file Parameters ---------- config_file : str Configuration file for system tests. dict_drivers : dict Keys: driver names {'classic', 'image'} Content: corresponding VIC executable object (see tonic documentation) test_data_dir : str Path to test data sets. out_dir : str Path to output location Returns ------- test_results : dict Test results for all tests in config_file. See Also -------- run_unit_tests run_examples run_science run_release # Print test set welcome # Get setup # Process driver info # if single driver # Drop invalid driver tests # if single driver # if multiple drivers # Run individual system tests # print out status info # Setup directories for test # read template global parameter file # --- if single driver --- # # --- if multiple drivers --- # # If restart test, prepare running periods # () Find STATE_FORMAT option for later use # (2) Prepare running periods and initial state file info for # restart test # If mpi test, prepare a list of number of processors to be run # create template string # fill in global parameter options # --- if restart test, multiple runs --- # # Set up subdirectories and fill in global parameter options # for restart testing # --- if mpi test, multiple runs --- # # Set up subdirectories and output directories in global file for # multiprocessor testing # --- if driver-match test, one run for each driver --- # # Set up subdirectories and output directories in global file for # driver-match testing # --- else, single run --- # # replace global options from config file # --- extract global options to be substitute --- # # --- replace global options --- # # For the purpose of exact restart, if STATE_FORMAT is specified, # then record the specified value (instead of the one in the global # template file) # if multiple runs # save a copy of replacements for the next global file # replace global options for this global file # if cross-driver runs # save a copy of replacements for the next global file # replace global options for this global file # if single run # write global parameter file # Get optional kwargs for run executable # run VIC # Check return code # Overwrite mpi_proc in option kwargs # Run VIC # Check return code # Reset mpi_proc in option kwargs to None for classic # driver run # Check return code # Check return code # check output files (different tests depending on driver) # Check that the simulation completed for all grid cells # check for nans in all example files # check for exact restarts # check for multistream output # TODO: check_multistream_image(fnames) # check for mpi multiprocessor results # check that results from different drivers match # if we got this far, the test passed. # Handle errors # record the test results Run science tests from config file Parameters ---------- config_file : str Configuration file for science tests. vic_exe : VIC (object) VIC executable object (see tonic documentation). science_test_data_dir: str Path to science test data sets (archived VIC runs and observations) test_data_dir : str Path to test data sets. out_dir : str Path to output location driver : {'classic', 'image'} Driver to run tests on. nproc : int Number of processors to use for science tests Returns ------- test_results : dict Test results for all tests in config_file. See Also -------- run_unit_tests run_examples run_system run_release # Print test set welcome # Get setup # drop invalid driver tests # Run individual tests # print out status info # Setup directories for test # read template global parameter file # create template string # fill in global parameter options # write global parameter file # Get optional kwargs for run executable # run VIC # Run the VIC simulation # Check return code # check output files (different tests depending on driver) # Check that the simulation completed for all grid cells # check for nans in all example files # plot science test results # if we got this far, the test passed. # Handle errors # record the test results Run examples tests from config file Parameters ---------- config_file : str Configuration file for example tests. vic_exe : VIC (object) VIC executable object (see tonic documentation). test_data_dir : str Path to test data sets. out_dir : str Path to output location driver : {'classic', 'image'} Driver to run tests on. Returns ------- test_results : dict Test results for all tests in config_file. See Also -------- run_unit_tests run_system run_science run_release # Print test set welcome # Get setup # drop invalid driver tests # Run individual examples # print out status info # Setup directories for test # read template global parameter file # create template string # fill in global parameter options # write global parameter file # Get optional kwargs for run executable # run VIC # Run the VIC simulation # Check return code # check output files (different tests depending on driver) # Check that the simulation completed for all grid cells # check for nans in all example files # if we got this far, the test passed. # Handle errors # record the test results # Print examples footer Run release from config file NOT IMPLEMENTED | 1.85647 | 2 |
hetio/permute.py | gwaygenomics/hetio | 0 | 6614133 | <gh_stars>0
import collections
import random
import logging
from hetio.hetnet import Graph
def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):
"""
Shuffle edges within metaedge category. Preserves node degree but randomizes
edges.
"""
if log:
logging.info('Creating permuted graph template')
permuted_graph = Graph(graph.metagraph)
for (metanode_identifier, node_identifier), node in graph.node_dict.items():
permuted_graph.add_node(
metanode_identifier, node_identifier, name=node.name, data=node.data)
if log:
logging.info('Retrieving graph edges')
metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True)
if log:
logging.info('Adding permuted edges')
all_stats = list()
for metaedge, edges in metaedge_to_edges.items():
if log:
logging.info(metaedge)
excluded_pair_set = metaedge_to_excluded.get(metaedge, set())
pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges]
directed = metaedge.direction != 'both'
permuted_pair_list, stats = permute_pair_list(
pair_list, directed=directed, multiplier=multiplier,
excluded_pair_set=excluded_pair_set, seed=seed, log=log)
for stat in stats:
stat['metaedge'] = metaedge
stat['abbrev'] = metaedge.get_abbrev()
all_stats.extend(stats)
for pair in permuted_pair_list:
permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction)
return permuted_graph, all_stats
def permute_pair_list(pair_list, directed=False, multiplier=10, excluded_pair_set=set(), seed=0, log=False):
"""
If n_perm is not specific, perform 10 times the number of edges of permutations
May not work for directed edges
"""
random.seed(seed)
pair_set = set(pair_list)
assert len(pair_set) == len(pair_list)
edge_number = len(pair_list)
n_perm = int(edge_number * multiplier)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
if log:
logging.info('{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)'.format(
edge_number, n_perm, seed, directed, len(excluded_pair_set)))
orig_pair_set = pair_set.copy()
step = max(1, n_perm // 10)
print_at = list(range(step, n_perm, step)) + [n_perm - 1]
stats = list()
for i in range(n_perm):
# Same two random edges
i_0 = random.randrange(edge_number)
i_1 = random.randrange(edge_number)
# Same edge selected twice
if i_0 == i_1:
count_same_edge += 1
continue
pair_0 = pair_list.pop(i_0)
pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1)
new_pair_0 = pair_0[0], pair_1[1]
new_pair_1 = pair_1[0], pair_0[1]
valid = False
for pair in new_pair_0, new_pair_1:
if pair[0] == pair[1]:
count_self_loop += 1
break # edge is a self-loop
if pair in pair_set:
count_duplicate += 1
break # edge is a duplicate
if not directed and (pair[1], pair[0]) in pair_set:
count_undir_dup += 1
break # edge is a duplicate
if pair in excluded_pair_set:
count_excluded += 1
break # edge is excluded
else:
# edge passed all validity conditions
valid = True
# If new edges are invalid
if not valid:
for pair in pair_0, pair_1:
pair_list.append(pair)
# If new edges are valid
else:
for pair in pair_0, pair_1:
pair_set.remove(pair)
for pair in new_pair_0, new_pair_1:
pair_set.add(pair)
pair_list.append(pair)
if i in print_at:
stat = collections.OrderedDict()
stat['cumulative_attempts'] = i
index = print_at.index(i)
stat['attempts'] = print_at[index] + 1 if index == 0 else print_at[index] - print_at[index - 1]
stat['complete'] = (i + 1) / n_perm
stat['unchanged'] = len(orig_pair_set & pair_set) / len(pair_set)
stat['same_edge'] = count_same_edge / stat['attempts']
stat['self_loop'] = count_self_loop / stat['attempts']
stat['duplicate'] = count_duplicate / stat['attempts']
stat['undirected_duplicate'] = count_undir_dup / stat['attempts']
stat['excluded'] = count_excluded / stat['attempts']
stats.append(stat)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
assert len(pair_set) == edge_number
return pair_list, stats
| import collections
import random
import logging
from hetio.hetnet import Graph
def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):
"""
Shuffle edges within metaedge category. Preserves node degree but randomizes
edges.
"""
if log:
logging.info('Creating permuted graph template')
permuted_graph = Graph(graph.metagraph)
for (metanode_identifier, node_identifier), node in graph.node_dict.items():
permuted_graph.add_node(
metanode_identifier, node_identifier, name=node.name, data=node.data)
if log:
logging.info('Retrieving graph edges')
metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True)
if log:
logging.info('Adding permuted edges')
all_stats = list()
for metaedge, edges in metaedge_to_edges.items():
if log:
logging.info(metaedge)
excluded_pair_set = metaedge_to_excluded.get(metaedge, set())
pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges]
directed = metaedge.direction != 'both'
permuted_pair_list, stats = permute_pair_list(
pair_list, directed=directed, multiplier=multiplier,
excluded_pair_set=excluded_pair_set, seed=seed, log=log)
for stat in stats:
stat['metaedge'] = metaedge
stat['abbrev'] = metaedge.get_abbrev()
all_stats.extend(stats)
for pair in permuted_pair_list:
permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction)
return permuted_graph, all_stats
def permute_pair_list(pair_list, directed=False, multiplier=10, excluded_pair_set=set(), seed=0, log=False):
"""
If n_perm is not specific, perform 10 times the number of edges of permutations
May not work for directed edges
"""
random.seed(seed)
pair_set = set(pair_list)
assert len(pair_set) == len(pair_list)
edge_number = len(pair_list)
n_perm = int(edge_number * multiplier)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
if log:
logging.info('{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)'.format(
edge_number, n_perm, seed, directed, len(excluded_pair_set)))
orig_pair_set = pair_set.copy()
step = max(1, n_perm // 10)
print_at = list(range(step, n_perm, step)) + [n_perm - 1]
stats = list()
for i in range(n_perm):
# Same two random edges
i_0 = random.randrange(edge_number)
i_1 = random.randrange(edge_number)
# Same edge selected twice
if i_0 == i_1:
count_same_edge += 1
continue
pair_0 = pair_list.pop(i_0)
pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1)
new_pair_0 = pair_0[0], pair_1[1]
new_pair_1 = pair_1[0], pair_0[1]
valid = False
for pair in new_pair_0, new_pair_1:
if pair[0] == pair[1]:
count_self_loop += 1
break # edge is a self-loop
if pair in pair_set:
count_duplicate += 1
break # edge is a duplicate
if not directed and (pair[1], pair[0]) in pair_set:
count_undir_dup += 1
break # edge is a duplicate
if pair in excluded_pair_set:
count_excluded += 1
break # edge is excluded
else:
# edge passed all validity conditions
valid = True
# If new edges are invalid
if not valid:
for pair in pair_0, pair_1:
pair_list.append(pair)
# If new edges are valid
else:
for pair in pair_0, pair_1:
pair_set.remove(pair)
for pair in new_pair_0, new_pair_1:
pair_set.add(pair)
pair_list.append(pair)
if i in print_at:
stat = collections.OrderedDict()
stat['cumulative_attempts'] = i
index = print_at.index(i)
stat['attempts'] = print_at[index] + 1 if index == 0 else print_at[index] - print_at[index - 1]
stat['complete'] = (i + 1) / n_perm
stat['unchanged'] = len(orig_pair_set & pair_set) / len(pair_set)
stat['same_edge'] = count_same_edge / stat['attempts']
stat['self_loop'] = count_self_loop / stat['attempts']
stat['duplicate'] = count_duplicate / stat['attempts']
stat['undirected_duplicate'] = count_undir_dup / stat['attempts']
stat['excluded'] = count_excluded / stat['attempts']
stats.append(stat)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
assert len(pair_set) == edge_number
return pair_list, stats | en | 0.744463 | Shuffle edges within metaedge category. Preserves node degree but randomizes edges. If n_perm is not specific, perform 10 times the number of edges of permutations May not work for directed edges # Same two random edges # Same edge selected twice # edge is a self-loop # edge is a duplicate # edge is a duplicate # edge is excluded # edge passed all validity conditions # If new edges are invalid # If new edges are valid | 2.358078 | 2 |
hpcswtest_report.py | idaholab/hpcswtest | 6 | 6614134 | <reponame>idaholab/hpcswtest<filename>hpcswtest_report.py
#!/usr/bin/env python
#
#Copyright 2017 Battelle Energy Alliance, LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""Read HPC Software test suite output results files, determine
if results are correct or not and print QA report.
Initial Version Created on Dec 24, 2015 (by C.Garvey). """
import sys
import os
import re
import socket
import shutil
import glob
NUM_MPI_PROCS = 2
compile_sw_l = ["compiler", "mpi", "blas", "boost"]
dir_sw_l = ["vasp", "lammps", "helios", "cth", "mc21", "nwchem"]
check_file_patterns = {"compiler": {
"file_patterns": {
"pbs_stdout": ["code is working"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"mpi": {
"file_patterns": {
"pbs_stdout": ["Hello"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"blas": {
"file_patterns": {
"pbs_stdout": ["SUM C =\s+45"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"boost": {
"file_patterns": {
"pbs_stdout": ["Working"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"abaqus": {
"file_patterns": {
"pbs_stdout": [
"^Begin Abaqus/Standard Analysis$",
"^End Abaqus/Standard Analysis$",
"COMPLETED"
]
},
"files_exist": [],
"check_file_sizes": []
},
"starccm": {
"file_patterns": {
"pbs_stdout": [
"^Configuring finished$",
"^Saving:",
"^Server process exited with code\s*:*\s+0$"
]
},
"files_exist": [],
"check_file_sizes": []
},
"vasp": {
"file_patterns": {
"OUTCAR": [
"aborting loop because EDIFF is reached",
"General timing and accounting informations for this job"
]
},
"files_exist": [],
"check_file_sizes": []
},
"matlab": {
"file_patterns": {
"pbs_stdout": [
"^e =$",
"991$"
]
},
"files_exist": [],
"check_file_sizes": []
},
"m": {
"file_patterns": {
"pbs_stdout": [
"mcrun is done$"
]
},
"files_exist": ["mcnp_o","mcnp_r","mcnp_s"],
"check_file_sizes": []
},
"x": {
"file_patterns": {
"pbs_stdout": [
"mcrun is done$"
]
},
"files_exist": ["mcnp_o","mcnp_r","mcnp_m"],
"check_file_sizes": []
},
"helios": {
"file_patterns": {
"pbs_stdout": [
"ZENITH Successful Completion"
]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"mc21": {
"file_patterns": {
"pbs_stdout": [
"Simulation Completed",
"Number of errors:\s*0"
]
},
"files_exist": [],
"check_file_sizes": []
},
"scale": {
"file_patterns": {
"scale_out": [
"normal termination"
]
},
"files_exist": [],
"check_file_sizes": []
},
"scale62": {
"file_patterns": {
"scale_out": [
"SCALE is finished"
]
},
"files_exist": [],
"check_file_sizes": []
},
"serpent": {
"file_patterns": {
"pbs_stdout": [
"Transport cycle completed"
]
},
"files_exist": ["serpent_out"],
"check_file_sizes": []
},
"gaussian": {
"file_patterns": {
"gaussian_out": [
"Normal termination of Gaussian"
]
},
"files_exist": [],
"check_file_sizes": []
},
"cth": {
"file_patterns": {
"pbs_stdout": [
"CALCULATION TERMINATED BECAUSE STOP TIME ACHIEVED"
]
},
"files_exist": ["octh"],
"check_file_sizes": []
},
"lammps": {
"file_patterns": {
"log.lammps": [
"MPI task timing breakdown",
"Total wall time"
]
},
"files_exist": [],
"check_file_sizes": []
},
"nwchem": {
"file_patterns": {
"pbs_stdout": [
"CITATION",
"Total times cpu"
]
},
"files_exist": ["ecpchho.db"],
"check_file_sizes": []
}
}
g_hostname = socket.gethostname()
#
if g_hostname == "falcon1" or g_hostname == "falcon2" or g_hostname == "falconpbs" or g_hostname == "service2":
clustername = "falcon"
else:
clustername = g_hostname
def concat_files(f1_name, f2_name, f_name):
# print f1_name, f2_name, f_name
f1 = open(f1_name, 'r')
if os.path.exists(f2_name):
f2 = open(f2_name, 'r')
f = open(f_name, 'w')
f.write(f1.read())
if os.path.exists(f2_name):
f.write(f2.read())
def gaussian_html_link_str(str1, jobid, result):
file_o = str1 + ".o" + jobid
file_e = str1 + ".e" + jobid
file_out2 = str1 + ".oe" + jobid
file_out = re.split("/",file_out2)[-1]
concat_files(file_o,file_e,file_out)
result = "<a href=\"" + str1 + "\"" + "target=\"_blank\">" + result + "</a>"
return result
def html_link_str(str1, jobid, result):
file_o = str1 + ".o" + jobid
file_e = str1 + ".e" + jobid
file_out2 = str1 + ".oe" + jobid
file_out = re.split("/",file_out2)[-1]
concat_files(file_o,file_e,file_out)
result = "<a href=\"" + clustername + "/" + file_out + "\"" + "target=\"_blank\">" + result + "</a>"
return result
def log_link_str(hostname, file_str, result):
shutil.copy(hostname + "_" + file_str + ".log", hostname + "_" + file_str + ".txt")
link_str = "<a href=\"" + clustername + "/" + hostname + "_" + file_str + ".txt" + "\" target=\"_blank\">" + result + "</a>"
return link_str
def find_result_counts(l):
cnt_p = 0
cnt_f = 0
cnt_c = 0
cnt_r = 0
for e in l:
if e:
if e == "passed":
cnt_p = cnt_p + 1
elif e == "failed":
cnt_f = cnt_f + 1
elif e == "check":
cnt_c = cnt_c + 1
elif e == "running":
cnt_r = cnt_r + 1
else:
print "Warning: Do not recognize the string,",e
return cnt_p,cnt_f,cnt_c,cnt_r
def get_sw_result_totals(sw_dict):
passed_r,failed_r,checked_r,running_r = find_result_counts(sw_dict["results"])
passed_rr,failed_rr,checked_rr,running_rr = find_result_counts(sw_dict["run_results"])
total_passed = passed_r + passed_rr
total_failed = failed_r + failed_rr
total_checked = checked_r + checked_rr
total_running = running_r + running_rr
total_tests = total_passed + total_failed + total_checked + total_running
return total_tests,total_passed,total_failed,total_checked,total_running
def get_all_result_totals(report_dict):
total_tests = 0
total_passed = 0
total_failed = 0
total_checked = 0
total_running = 0
for sw_name in report_dict:
total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running = get_sw_result_totals(report_dict[sw_name])
total_tests = total_tests + total_sw_tests
total_passed = total_passed + total_sw_passed
total_failed = total_failed + total_sw_failed
total_checked = total_checked + total_sw_checked
total_running = total_running + total_sw_running
return total_tests,total_passed,total_failed,total_checked,total_running
def print_report(run_date, hostname, report_dict):
if len(sys.argv) > 1 and re.match("h",sys.argv[1]):
gen_html = True
else:
gen_html = False
if gen_html:
print "<html>"
print "<body>"
print "<pre>"
print "\nSoftware Quality Assurance Tests run on ",hostname," at",run_date
for sw_name in report_dict:
# print "sw_name=",sw_name
print '\n{:#^142}\n'.format(sw_name + ' Tests')
for module,compiler,result,pbs_jobid,dir_name,pbs_jobname,run_result in zip(report_dict[sw_name]["module_names"],report_dict[sw_name]["compiler_names"],report_dict[sw_name]["results"],report_dict[sw_name]["pbs_job_ids"],report_dict[sw_name]["dir_names"],report_dict[sw_name]["pbs_job_names"],report_dict[sw_name]["run_results"]):
# print module,compiler,result,pbs_jobid,pbs_jobname,run_result
if sw_name in compile_sw_l:
if gen_html and re.search("failed$|check$",result):
link_str = log_link_str(hostname,sw_name+"_test",result)
print '{:<40}{:<10}{:-<80} {:<8}'.format(module,compiler,'Compile',link_str)
else:
print '{:<40}{:<10}{:-<80} {:<8}'.format(module,compiler,'Compile',result)
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:<10}{:<4}{:-<76} {:<8}'.format(module,compiler,'Job',pbs_jobid,link_str)
else:
print '{:<40}{:<10}{:<4}{:-<76} {:<8}'.format(module,compiler,'Job',pbs_jobid,run_result)
elif sw_name in dir_sw_l:
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(dir_name+"/"+pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:<20}{:-<70} {:<8}'.format(module,dir_name,pbs_jobid,link_str)
else:
print '{:<40}{:<20}{:-<70} {:<8}'.format(module,dir_name,pbs_jobid,run_result)
elif sw_name == "python2" or sw_name == "python3":
if gen_html and re.search("failed$|check$",result):
link_str = log_link_str(hostname,sw_name+"_test",result)
print '{:<40}{:-<90} {:<8}'.format(module,compiler,link_str)
else:
print '{:<40}{:-<90} {:<8}'.format(module,compiler,result)
else:
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:-<90} {:<8}'.format(module,pbs_jobid,link_str)
else:
print '{:<40}{:-<90} {:<8}'.format(module,pbs_jobid,run_result)
total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running = get_sw_result_totals(report_dict[sw_name])
print '\n{:#<142}\n'.format("")
print 'Total Number of {} Tests = {} (Passed = {} Failed = {} Checked = {} Running = {})'.format(sw_name,total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running)
# print '\n{:#<142}\n'.format("")
total_tests,total_passed,total_failed,total_checked,total_running = get_all_result_totals(report_dict)
print '\n{:#<142}\n'.format("")
print 'Total Number of Tests = {} (Passed = {} Failed = {} Checked = {} Running = {})'.format(total_tests,total_passed,total_failed,total_checked,total_running)
print '\n{:#<142}\n'.format("")
if gen_html:
print "</pre>"
print "</body>"
print "</html>"
def find_results_out_files():
results_out_files = []
for file in glob.glob('*_results.out'):
# print file
results_out_files.append(file)
return results_out_files
def find_name(file_name):
sw_name = None
s = re.search('[a-zA-Z0-9]+_([a-zA-Z0-9]+)_results.out',file_name)
if s is not None:
sw_name = s.groups(0)
return s.groups(0)[0]
def extract_sw_names(results_out_files):
sw_names = []
for file_name in results_out_files:
sw_name = find_name(file_name)
sw_names.append(sw_name)
return sw_names
def collect_results(f, sw_name):
modules = []
dlist1 = []
dlist2 = []
dlist3 = []
pbs_job_ids = []
for line in f:
# print line,
data = re.split("\s*",line.strip())
# s = re.search("passed$|failed$|check$",line.strip())
if len(data) > 1:
modules.append(data[0])
if len(data) == 2:
dlist1.append(data[1])
elif len(data) == 3:
dlist1.append(data[1])
dlist2.append(data[2])
elif len(data) == 4:
dlist1.append(data[1])
dlist2.append(data[2])
dlist3.append(data[3])
else:
print "Warning: Do not recognize the line, ",line
# print "next line1=",f.next()
if sw_name == "python2" or sw_name == "python3":
pbs_job_ids.append(None)
continue
line2 = f.next()
s2 = re.search('\d+',line2.strip())
if s2 is not None:
pbs_job_ids.append(s2.group())
else:
pbs_job_ids.append("not_run")
return modules,dlist1,dlist2,dlist3,pbs_job_ids
def parse_arg(arg):
len_arg = len(arg)
if len_arg == 1:
job_name = arg[0]
elif len_arg == 2:
dir_name = arg[0]
job_name = arg[1]
else:
print "Warning: incorrect number of variable args passed,",arg
return dir_name,job_name
def get_file_path(dir_name, file):
if dir_name:
file_path = os.path.join(dir_name, file)
else:
file_path = file
return file_path
def get_file_name(file, job_name, job_id):
if file == "pbs_stdout":
file_name = job_name + ".o" + job_id
elif file == "pbs_stderr":
file_name = job_name + ".e" + job_id
elif file == "mcnp_o":
file_name = job_name + ".o"
elif file == "mcnp_r":
file_name = job_name + ".r"
elif file == "mcnp_s":
file_name = job_name + ".s"
elif file == "mcnp_m":
file_name = job_name + ".m"
elif file == "scale_out":
file_name = job_name + ".out"
elif file == "serpent_out":
file_name = job_name + ".out"
elif file == "gaussian_out":
file_name = job_name + ".out"
else:
file_name = file
return file_name
def check_file_patterns_found(file_pattern_d, dir_name, job_name, job_id):
for file in file_pattern_d:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
try:
f = open(file_path,"r")
except IOError, e:
print "Error: could not open file", e
return False
lines = f.readlines()
for pattern in file_pattern_d[file]:
# print "search for pattern.",pattern
found = False
for line in lines:
if re.search(pattern,line) is not None:
# print "found pattern.",pattern
found = True
break
if not found:
break
return found
def check_file_patterns_found_cnt(file_pattern_d, dir_name, job_name, job_id):
for file in file_pattern_d:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
f = open(file_path,"r")
lines = f.readlines()
for pattern in file_pattern_d[file]:
# print "search for pattern.",pattern
found = False
cnt = 0
run_result = "failed"
for line in lines:
s = re.findall(pattern,line)
if s is not None:
# print "found pattern.",pattern
cnt = cnt + len(s)
found = True
if cnt < NUM_MPI_PROCS:
run_result = "check"
break
elif cnt == NUM_MPI_PROCS:
run_result = "passed"
elif not found:
run_result = "failed"
break
return run_result
def check_files_exist(files_exist_l, dir_name, job_name, job_id):
files_exist = True
for file in files_exist_l:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
if os.path.isfile(file_path):
files_exist = True
else:
files_exist = False
break
return files_exist
def check_files_size_nonzero(files_check_size_l, dir_name, job_name, job_id):
files_size_nonzero = False
for file in files_check_size_l:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
if os.path.isfile(file_path):
if os.path.getsize(file_path) > 0:
files_size_nonzero = True
break
else:
files_size_nonzero = True
break
return files_size_nonzero
def check_run_results_dirnames_jobnames_jobids(sw_name, dir_names, pbs_job_names, pbs_job_ids):
run_results = []
if sw_name == "python2" or sw_name == "python3":
run_results = [None]*len(dir_names)
else:
for dir_name, pbs_job_name, pbs_job_id in zip(dir_names, pbs_job_names, pbs_job_ids):
# print "pbs_job_id=",pbs_job_id
if re.search('\d+',pbs_job_id.strip()) is not None:
pbs_stdout_file = get_file_name("pbs_stdout", pbs_job_name, pbs_job_id)
# print "pbs_stdout_file=",pbs_stdout_file
pbs_stdout_filepath = get_file_path(dir_name, pbs_stdout_file)
# print "pbs_stdout_filepath=",pbs_stdout_filepath
if os.path.isfile(pbs_stdout_filepath):
if check_file_patterns_found(check_file_patterns[sw_name]["file_patterns"], dir_name, pbs_job_name, pbs_job_id):
if sw_name == "mpi":
run_result = check_file_patterns_found_cnt(check_file_patterns[sw_name]["file_patterns"], dir_name, pbs_job_name, pbs_job_id)
else:
run_result = "passed"
# print "check_file_patterns_found=passed"
if check_files_size_nonzero(check_file_patterns[sw_name]["check_file_sizes"], dir_name, pbs_job_name, pbs_job_id):
# print "check_files_size_nonzero=check"
run_result = "check"
if not check_files_exist(check_file_patterns[sw_name]["files_exist"], dir_name, pbs_job_name, pbs_job_id):
# print "check_files_exist=failed"
run_result = "failed"
else:
run_result = "failed"
else:
run_result = "running"
else:
run_result = "failed"
run_results.append(run_result)
return run_results
def get_compilernames_dirnames_jobnames_results(sw_name, dlist1, dlist2, dlist3):
compiler_names = []
dir_names = []
pbs_job_names = []
results = []
if sw_name in compile_sw_l:
compiler_names = dlist1
dir_names = [None]*len(dlist1)
pbs_job_names = dlist2
results = dlist3
elif sw_name in dir_sw_l:
compiler_names = [None]*len(dlist1)
dir_names = dlist1
pbs_job_names = dlist2
results = [None]*len(dlist2)
elif sw_name == "python2" or sw_name == "python3":
compiler_names = dlist1
dir_names = [None]*len(dlist1)
pbs_job_names = [None]*len(dlist1)
results = dlist2
else:
compiler_names = [None]*len(dlist1)
dir_names = [None]*len(dlist1)
pbs_job_names = dlist1
results = [None]*len(dlist1)
# print "Warning: Do not know what to do with the following software,",sw_name
return compiler_names, dir_names, pbs_job_names, results
def check_run_results(sw_name, dir_names, pbs_job_names, pbs_job_ids):
run_results = check_run_results_dirnames_jobnames_jobids(sw_name, dir_names, pbs_job_names, pbs_job_ids)
return run_results
def update_report_dict(report_dict, sw_name, module_names, compiler_names, dir_names, pbs_job_names, pbs_job_ids, results, run_results):
if sw_name not in report_dict:
report_dict[sw_name] = {}
report_dict[sw_name]["module_names"] = module_names
report_dict[sw_name]["compiler_names"] = compiler_names
report_dict[sw_name]["dir_names"] = dir_names
report_dict[sw_name]["pbs_job_names"] = pbs_job_names
report_dict[sw_name]["pbs_job_ids"] = pbs_job_ids
report_dict[sw_name]["results"] = results
report_dict[sw_name]["run_results"] = run_results
return report_dict
def get_dateHost():
f = open("datehost.out", "r")
lines = f.readlines()
date = lines[0].strip()
host = lines[1].strip()
return date,host
def main():
report_dict = {}
run_date,run_hostname = get_dateHost()
results_out_files = find_results_out_files()
# print "results_out_files=",results_out_files
sw_names = extract_sw_names(results_out_files)
# print "sw_names =",sw_names
for results_out_file in results_out_files:
# print "results_out_file=",results_out_file
sw_name = find_name(results_out_file)
f = open(results_out_file, "r")
modules,dlist1,dlist2,dlist3,pbs_job_ids = collect_results(f, sw_name)
compiler_names, dir_names, pbs_job_names, results = get_compilernames_dirnames_jobnames_results(sw_name, dlist1, dlist2, dlist3)
run_results = check_run_results(sw_name,dir_names,pbs_job_names,pbs_job_ids)
# print "run_results=",run_results
report_dict = update_report_dict(report_dict, sw_name, modules, compiler_names, dir_names, pbs_job_names, pbs_job_ids, results, run_results)
# print "report_dict=",report_dict
print_report(run_date,run_hostname,report_dict)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
#
#Copyright 2017 Battelle Energy Alliance, LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""Read HPC Software test suite output results files, determine
if results are correct or not and print QA report.
Initial Version Created on Dec 24, 2015 (by C.Garvey). """
import sys
import os
import re
import socket
import shutil
import glob
NUM_MPI_PROCS = 2
compile_sw_l = ["compiler", "mpi", "blas", "boost"]
dir_sw_l = ["vasp", "lammps", "helios", "cth", "mc21", "nwchem"]
check_file_patterns = {"compiler": {
"file_patterns": {
"pbs_stdout": ["code is working"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"mpi": {
"file_patterns": {
"pbs_stdout": ["Hello"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"blas": {
"file_patterns": {
"pbs_stdout": ["SUM C =\s+45"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"boost": {
"file_patterns": {
"pbs_stdout": ["Working"]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"abaqus": {
"file_patterns": {
"pbs_stdout": [
"^Begin Abaqus/Standard Analysis$",
"^End Abaqus/Standard Analysis$",
"COMPLETED"
]
},
"files_exist": [],
"check_file_sizes": []
},
"starccm": {
"file_patterns": {
"pbs_stdout": [
"^Configuring finished$",
"^Saving:",
"^Server process exited with code\s*:*\s+0$"
]
},
"files_exist": [],
"check_file_sizes": []
},
"vasp": {
"file_patterns": {
"OUTCAR": [
"aborting loop because EDIFF is reached",
"General timing and accounting informations for this job"
]
},
"files_exist": [],
"check_file_sizes": []
},
"matlab": {
"file_patterns": {
"pbs_stdout": [
"^e =$",
"991$"
]
},
"files_exist": [],
"check_file_sizes": []
},
"m": {
"file_patterns": {
"pbs_stdout": [
"mcrun is done$"
]
},
"files_exist": ["mcnp_o","mcnp_r","mcnp_s"],
"check_file_sizes": []
},
"x": {
"file_patterns": {
"pbs_stdout": [
"mcrun is done$"
]
},
"files_exist": ["mcnp_o","mcnp_r","mcnp_m"],
"check_file_sizes": []
},
"helios": {
"file_patterns": {
"pbs_stdout": [
"ZENITH Successful Completion"
]
},
"files_exist": [],
"check_file_sizes": ["pbs_stderr"]
},
"mc21": {
"file_patterns": {
"pbs_stdout": [
"Simulation Completed",
"Number of errors:\s*0"
]
},
"files_exist": [],
"check_file_sizes": []
},
"scale": {
"file_patterns": {
"scale_out": [
"normal termination"
]
},
"files_exist": [],
"check_file_sizes": []
},
"scale62": {
"file_patterns": {
"scale_out": [
"SCALE is finished"
]
},
"files_exist": [],
"check_file_sizes": []
},
"serpent": {
"file_patterns": {
"pbs_stdout": [
"Transport cycle completed"
]
},
"files_exist": ["serpent_out"],
"check_file_sizes": []
},
"gaussian": {
"file_patterns": {
"gaussian_out": [
"Normal termination of Gaussian"
]
},
"files_exist": [],
"check_file_sizes": []
},
"cth": {
"file_patterns": {
"pbs_stdout": [
"CALCULATION TERMINATED BECAUSE STOP TIME ACHIEVED"
]
},
"files_exist": ["octh"],
"check_file_sizes": []
},
"lammps": {
"file_patterns": {
"log.lammps": [
"MPI task timing breakdown",
"Total wall time"
]
},
"files_exist": [],
"check_file_sizes": []
},
"nwchem": {
"file_patterns": {
"pbs_stdout": [
"CITATION",
"Total times cpu"
]
},
"files_exist": ["ecpchho.db"],
"check_file_sizes": []
}
}
g_hostname = socket.gethostname()
#
if g_hostname == "falcon1" or g_hostname == "falcon2" or g_hostname == "falconpbs" or g_hostname == "service2":
clustername = "falcon"
else:
clustername = g_hostname
def concat_files(f1_name, f2_name, f_name):
# print f1_name, f2_name, f_name
f1 = open(f1_name, 'r')
if os.path.exists(f2_name):
f2 = open(f2_name, 'r')
f = open(f_name, 'w')
f.write(f1.read())
if os.path.exists(f2_name):
f.write(f2.read())
def gaussian_html_link_str(str1, jobid, result):
file_o = str1 + ".o" + jobid
file_e = str1 + ".e" + jobid
file_out2 = str1 + ".oe" + jobid
file_out = re.split("/",file_out2)[-1]
concat_files(file_o,file_e,file_out)
result = "<a href=\"" + str1 + "\"" + "target=\"_blank\">" + result + "</a>"
return result
def html_link_str(str1, jobid, result):
file_o = str1 + ".o" + jobid
file_e = str1 + ".e" + jobid
file_out2 = str1 + ".oe" + jobid
file_out = re.split("/",file_out2)[-1]
concat_files(file_o,file_e,file_out)
result = "<a href=\"" + clustername + "/" + file_out + "\"" + "target=\"_blank\">" + result + "</a>"
return result
def log_link_str(hostname, file_str, result):
shutil.copy(hostname + "_" + file_str + ".log", hostname + "_" + file_str + ".txt")
link_str = "<a href=\"" + clustername + "/" + hostname + "_" + file_str + ".txt" + "\" target=\"_blank\">" + result + "</a>"
return link_str
def find_result_counts(l):
cnt_p = 0
cnt_f = 0
cnt_c = 0
cnt_r = 0
for e in l:
if e:
if e == "passed":
cnt_p = cnt_p + 1
elif e == "failed":
cnt_f = cnt_f + 1
elif e == "check":
cnt_c = cnt_c + 1
elif e == "running":
cnt_r = cnt_r + 1
else:
print "Warning: Do not recognize the string,",e
return cnt_p,cnt_f,cnt_c,cnt_r
def get_sw_result_totals(sw_dict):
passed_r,failed_r,checked_r,running_r = find_result_counts(sw_dict["results"])
passed_rr,failed_rr,checked_rr,running_rr = find_result_counts(sw_dict["run_results"])
total_passed = passed_r + passed_rr
total_failed = failed_r + failed_rr
total_checked = checked_r + checked_rr
total_running = running_r + running_rr
total_tests = total_passed + total_failed + total_checked + total_running
return total_tests,total_passed,total_failed,total_checked,total_running
def get_all_result_totals(report_dict):
total_tests = 0
total_passed = 0
total_failed = 0
total_checked = 0
total_running = 0
for sw_name in report_dict:
total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running = get_sw_result_totals(report_dict[sw_name])
total_tests = total_tests + total_sw_tests
total_passed = total_passed + total_sw_passed
total_failed = total_failed + total_sw_failed
total_checked = total_checked + total_sw_checked
total_running = total_running + total_sw_running
return total_tests,total_passed,total_failed,total_checked,total_running
def print_report(run_date, hostname, report_dict):
if len(sys.argv) > 1 and re.match("h",sys.argv[1]):
gen_html = True
else:
gen_html = False
if gen_html:
print "<html>"
print "<body>"
print "<pre>"
print "\nSoftware Quality Assurance Tests run on ",hostname," at",run_date
for sw_name in report_dict:
# print "sw_name=",sw_name
print '\n{:#^142}\n'.format(sw_name + ' Tests')
for module,compiler,result,pbs_jobid,dir_name,pbs_jobname,run_result in zip(report_dict[sw_name]["module_names"],report_dict[sw_name]["compiler_names"],report_dict[sw_name]["results"],report_dict[sw_name]["pbs_job_ids"],report_dict[sw_name]["dir_names"],report_dict[sw_name]["pbs_job_names"],report_dict[sw_name]["run_results"]):
# print module,compiler,result,pbs_jobid,pbs_jobname,run_result
if sw_name in compile_sw_l:
if gen_html and re.search("failed$|check$",result):
link_str = log_link_str(hostname,sw_name+"_test",result)
print '{:<40}{:<10}{:-<80} {:<8}'.format(module,compiler,'Compile',link_str)
else:
print '{:<40}{:<10}{:-<80} {:<8}'.format(module,compiler,'Compile',result)
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:<10}{:<4}{:-<76} {:<8}'.format(module,compiler,'Job',pbs_jobid,link_str)
else:
print '{:<40}{:<10}{:<4}{:-<76} {:<8}'.format(module,compiler,'Job',pbs_jobid,run_result)
elif sw_name in dir_sw_l:
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(dir_name+"/"+pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:<20}{:-<70} {:<8}'.format(module,dir_name,pbs_jobid,link_str)
else:
print '{:<40}{:<20}{:-<70} {:<8}'.format(module,dir_name,pbs_jobid,run_result)
elif sw_name == "python2" or sw_name == "python3":
if gen_html and re.search("failed$|check$",result):
link_str = log_link_str(hostname,sw_name+"_test",result)
print '{:<40}{:-<90} {:<8}'.format(module,compiler,link_str)
else:
print '{:<40}{:-<90} {:<8}'.format(module,compiler,result)
else:
if gen_html and re.search("failed$|check$",run_result) and re.search("\d+",pbs_jobid):
link_str = html_link_str(pbs_jobname, pbs_jobid, run_result)
print '{:<40}{:-<90} {:<8}'.format(module,pbs_jobid,link_str)
else:
print '{:<40}{:-<90} {:<8}'.format(module,pbs_jobid,run_result)
total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running = get_sw_result_totals(report_dict[sw_name])
print '\n{:#<142}\n'.format("")
print 'Total Number of {} Tests = {} (Passed = {} Failed = {} Checked = {} Running = {})'.format(sw_name,total_sw_tests,total_sw_passed,total_sw_failed,total_sw_checked,total_sw_running)
# print '\n{:#<142}\n'.format("")
total_tests,total_passed,total_failed,total_checked,total_running = get_all_result_totals(report_dict)
print '\n{:#<142}\n'.format("")
print 'Total Number of Tests = {} (Passed = {} Failed = {} Checked = {} Running = {})'.format(total_tests,total_passed,total_failed,total_checked,total_running)
print '\n{:#<142}\n'.format("")
if gen_html:
print "</pre>"
print "</body>"
print "</html>"
def find_results_out_files():
results_out_files = []
for file in glob.glob('*_results.out'):
# print file
results_out_files.append(file)
return results_out_files
def find_name(file_name):
sw_name = None
s = re.search('[a-zA-Z0-9]+_([a-zA-Z0-9]+)_results.out',file_name)
if s is not None:
sw_name = s.groups(0)
return s.groups(0)[0]
def extract_sw_names(results_out_files):
sw_names = []
for file_name in results_out_files:
sw_name = find_name(file_name)
sw_names.append(sw_name)
return sw_names
def collect_results(f, sw_name):
modules = []
dlist1 = []
dlist2 = []
dlist3 = []
pbs_job_ids = []
for line in f:
# print line,
data = re.split("\s*",line.strip())
# s = re.search("passed$|failed$|check$",line.strip())
if len(data) > 1:
modules.append(data[0])
if len(data) == 2:
dlist1.append(data[1])
elif len(data) == 3:
dlist1.append(data[1])
dlist2.append(data[2])
elif len(data) == 4:
dlist1.append(data[1])
dlist2.append(data[2])
dlist3.append(data[3])
else:
print "Warning: Do not recognize the line, ",line
# print "next line1=",f.next()
if sw_name == "python2" or sw_name == "python3":
pbs_job_ids.append(None)
continue
line2 = f.next()
s2 = re.search('\d+',line2.strip())
if s2 is not None:
pbs_job_ids.append(s2.group())
else:
pbs_job_ids.append("not_run")
return modules,dlist1,dlist2,dlist3,pbs_job_ids
def parse_arg(arg):
len_arg = len(arg)
if len_arg == 1:
job_name = arg[0]
elif len_arg == 2:
dir_name = arg[0]
job_name = arg[1]
else:
print "Warning: incorrect number of variable args passed,",arg
return dir_name,job_name
def get_file_path(dir_name, file):
if dir_name:
file_path = os.path.join(dir_name, file)
else:
file_path = file
return file_path
def get_file_name(file, job_name, job_id):
if file == "pbs_stdout":
file_name = job_name + ".o" + job_id
elif file == "pbs_stderr":
file_name = job_name + ".e" + job_id
elif file == "mcnp_o":
file_name = job_name + ".o"
elif file == "mcnp_r":
file_name = job_name + ".r"
elif file == "mcnp_s":
file_name = job_name + ".s"
elif file == "mcnp_m":
file_name = job_name + ".m"
elif file == "scale_out":
file_name = job_name + ".out"
elif file == "serpent_out":
file_name = job_name + ".out"
elif file == "gaussian_out":
file_name = job_name + ".out"
else:
file_name = file
return file_name
def check_file_patterns_found(file_pattern_d, dir_name, job_name, job_id):
for file in file_pattern_d:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
try:
f = open(file_path,"r")
except IOError, e:
print "Error: could not open file", e
return False
lines = f.readlines()
for pattern in file_pattern_d[file]:
# print "search for pattern.",pattern
found = False
for line in lines:
if re.search(pattern,line) is not None:
# print "found pattern.",pattern
found = True
break
if not found:
break
return found
def check_file_patterns_found_cnt(file_pattern_d, dir_name, job_name, job_id):
for file in file_pattern_d:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
f = open(file_path,"r")
lines = f.readlines()
for pattern in file_pattern_d[file]:
# print "search for pattern.",pattern
found = False
cnt = 0
run_result = "failed"
for line in lines:
s = re.findall(pattern,line)
if s is not None:
# print "found pattern.",pattern
cnt = cnt + len(s)
found = True
if cnt < NUM_MPI_PROCS:
run_result = "check"
break
elif cnt == NUM_MPI_PROCS:
run_result = "passed"
elif not found:
run_result = "failed"
break
return run_result
def check_files_exist(files_exist_l, dir_name, job_name, job_id):
files_exist = True
for file in files_exist_l:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
if os.path.isfile(file_path):
files_exist = True
else:
files_exist = False
break
return files_exist
def check_files_size_nonzero(files_check_size_l, dir_name, job_name, job_id):
files_size_nonzero = False
for file in files_check_size_l:
file_name = get_file_name(file, job_name, job_id)
file_path = get_file_path(dir_name, file_name)
if os.path.isfile(file_path):
if os.path.getsize(file_path) > 0:
files_size_nonzero = True
break
else:
files_size_nonzero = True
break
return files_size_nonzero
def check_run_results_dirnames_jobnames_jobids(sw_name, dir_names, pbs_job_names, pbs_job_ids):
run_results = []
if sw_name == "python2" or sw_name == "python3":
run_results = [None]*len(dir_names)
else:
for dir_name, pbs_job_name, pbs_job_id in zip(dir_names, pbs_job_names, pbs_job_ids):
# print "pbs_job_id=",pbs_job_id
if re.search('\d+',pbs_job_id.strip()) is not None:
pbs_stdout_file = get_file_name("pbs_stdout", pbs_job_name, pbs_job_id)
# print "pbs_stdout_file=",pbs_stdout_file
pbs_stdout_filepath = get_file_path(dir_name, pbs_stdout_file)
# print "pbs_stdout_filepath=",pbs_stdout_filepath
if os.path.isfile(pbs_stdout_filepath):
if check_file_patterns_found(check_file_patterns[sw_name]["file_patterns"], dir_name, pbs_job_name, pbs_job_id):
if sw_name == "mpi":
run_result = check_file_patterns_found_cnt(check_file_patterns[sw_name]["file_patterns"], dir_name, pbs_job_name, pbs_job_id)
else:
run_result = "passed"
# print "check_file_patterns_found=passed"
if check_files_size_nonzero(check_file_patterns[sw_name]["check_file_sizes"], dir_name, pbs_job_name, pbs_job_id):
# print "check_files_size_nonzero=check"
run_result = "check"
if not check_files_exist(check_file_patterns[sw_name]["files_exist"], dir_name, pbs_job_name, pbs_job_id):
# print "check_files_exist=failed"
run_result = "failed"
else:
run_result = "failed"
else:
run_result = "running"
else:
run_result = "failed"
run_results.append(run_result)
return run_results
def get_compilernames_dirnames_jobnames_results(sw_name, dlist1, dlist2, dlist3):
compiler_names = []
dir_names = []
pbs_job_names = []
results = []
if sw_name in compile_sw_l:
compiler_names = dlist1
dir_names = [None]*len(dlist1)
pbs_job_names = dlist2
results = dlist3
elif sw_name in dir_sw_l:
compiler_names = [None]*len(dlist1)
dir_names = dlist1
pbs_job_names = dlist2
results = [None]*len(dlist2)
elif sw_name == "python2" or sw_name == "python3":
compiler_names = dlist1
dir_names = [None]*len(dlist1)
pbs_job_names = [None]*len(dlist1)
results = dlist2
else:
compiler_names = [None]*len(dlist1)
dir_names = [None]*len(dlist1)
pbs_job_names = dlist1
results = [None]*len(dlist1)
# print "Warning: Do not know what to do with the following software,",sw_name
return compiler_names, dir_names, pbs_job_names, results
def check_run_results(sw_name, dir_names, pbs_job_names, pbs_job_ids):
run_results = check_run_results_dirnames_jobnames_jobids(sw_name, dir_names, pbs_job_names, pbs_job_ids)
return run_results
def update_report_dict(report_dict, sw_name, module_names, compiler_names, dir_names, pbs_job_names, pbs_job_ids, results, run_results):
if sw_name not in report_dict:
report_dict[sw_name] = {}
report_dict[sw_name]["module_names"] = module_names
report_dict[sw_name]["compiler_names"] = compiler_names
report_dict[sw_name]["dir_names"] = dir_names
report_dict[sw_name]["pbs_job_names"] = pbs_job_names
report_dict[sw_name]["pbs_job_ids"] = pbs_job_ids
report_dict[sw_name]["results"] = results
report_dict[sw_name]["run_results"] = run_results
return report_dict
def get_dateHost():
f = open("datehost.out", "r")
lines = f.readlines()
date = lines[0].strip()
host = lines[1].strip()
return date,host
def main():
report_dict = {}
run_date,run_hostname = get_dateHost()
results_out_files = find_results_out_files()
# print "results_out_files=",results_out_files
sw_names = extract_sw_names(results_out_files)
# print "sw_names =",sw_names
for results_out_file in results_out_files:
# print "results_out_file=",results_out_file
sw_name = find_name(results_out_file)
f = open(results_out_file, "r")
modules,dlist1,dlist2,dlist3,pbs_job_ids = collect_results(f, sw_name)
compiler_names, dir_names, pbs_job_names, results = get_compilernames_dirnames_jobnames_results(sw_name, dlist1, dlist2, dlist3)
run_results = check_run_results(sw_name,dir_names,pbs_job_names,pbs_job_ids)
# print "run_results=",run_results
report_dict = update_report_dict(report_dict, sw_name, modules, compiler_names, dir_names, pbs_job_names, pbs_job_ids, results, run_results)
# print "report_dict=",report_dict
print_report(run_date,run_hostname,report_dict)
if __name__ == '__main__':
main() | en | 0.655781 | #!/usr/bin/env python # #Copyright 2017 Battelle Energy Alliance, LLC # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # #http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # Read HPC Software test suite output results files, determine if results are correct or not and print QA report. Initial Version Created on Dec 24, 2015 (by C.Garvey). # # print f1_name, f2_name, f_name # print "sw_name=",sw_name #^142}\n'.format(sw_name + ' Tests') # print module,compiler,result,pbs_jobid,pbs_jobname,run_result #<142}\n'.format("") # print '\n{:#<142}\n'.format("") #<142}\n'.format("") #<142}\n'.format("") # print file # print line, # s = re.search("passed$|failed$|check$",line.strip()) # print "next line1=",f.next() # print "search for pattern.",pattern # print "found pattern.",pattern # print "search for pattern.",pattern # print "found pattern.",pattern # print "pbs_job_id=",pbs_job_id # print "pbs_stdout_file=",pbs_stdout_file # print "pbs_stdout_filepath=",pbs_stdout_filepath # print "check_file_patterns_found=passed" # print "check_files_size_nonzero=check" # print "check_files_exist=failed" # print "Warning: Do not know what to do with the following software,",sw_name # print "results_out_files=",results_out_files # print "sw_names =",sw_names # print "results_out_file=",results_out_file # print "run_results=",run_results # print "report_dict=",report_dict | 1.891894 | 2 |
maya_scripts/02_create_blendshapes_nodes.py | johndpope/FacialRetargeting | 0 | 6614135 | <reponame>johndpope/FacialRetargeting
import maya.cmds as cmds
import numpy as np
import os
save_path = "C:/Users/Michael/PycharmProjects/FacialRetargeting/data/"
save_name = "mesh_name_list.npy"
# select the folder containing all the blendshapes
bs_group = "Louise_bs_GRP"
base_mesh = "Louise"
# get all blendshapes' meshes
mesh_list = cmds.ls(bs_group, dag=1, type="mesh") # get all blenshapes from the blenshape group folder
# remove names issue and make a list of string instead of that "maya" list [u"", u""]
mesh_list_tuple = []
for mesh in mesh_list:
remove_letters = 5 # somehow maya adds "Shape" at the end of the mesh
if 'ShapeOrig' in mesh: # ... and sometimes "ShapeOrig"
remove_letters = 9
# create blendshape string list
mesh_list_tuple.append(str(mesh[:-remove_letters]))
print("mesh_list_tuple")
print(mesh_list_tuple)
# create a blendshape nodes for every blendshape mesh
cmds.blendShape(mesh_list_tuple, base_mesh, name="bs_node")
# save mesh names
np.save(os.path.join(save_path, save_name), mesh_list_tuple)
| import maya.cmds as cmds
import numpy as np
import os
save_path = "C:/Users/Michael/PycharmProjects/FacialRetargeting/data/"
save_name = "mesh_name_list.npy"
# select the folder containing all the blendshapes
bs_group = "Louise_bs_GRP"
base_mesh = "Louise"
# get all blendshapes' meshes
mesh_list = cmds.ls(bs_group, dag=1, type="mesh") # get all blenshapes from the blenshape group folder
# remove names issue and make a list of string instead of that "maya" list [u"", u""]
mesh_list_tuple = []
for mesh in mesh_list:
remove_letters = 5 # somehow maya adds "Shape" at the end of the mesh
if 'ShapeOrig' in mesh: # ... and sometimes "ShapeOrig"
remove_letters = 9
# create blendshape string list
mesh_list_tuple.append(str(mesh[:-remove_letters]))
print("mesh_list_tuple")
print(mesh_list_tuple)
# create a blendshape nodes for every blendshape mesh
cmds.blendShape(mesh_list_tuple, base_mesh, name="bs_node")
# save mesh names
np.save(os.path.join(save_path, save_name), mesh_list_tuple) | en | 0.6564 | # select the folder containing all the blendshapes # get all blendshapes' meshes # get all blenshapes from the blenshape group folder # remove names issue and make a list of string instead of that "maya" list [u"", u""] # somehow maya adds "Shape" at the end of the mesh # ... and sometimes "ShapeOrig" # create blendshape string list # create a blendshape nodes for every blendshape mesh # save mesh names | 2.839286 | 3 |
Python-Projects/Python-Projects-CursoemVideo/pythonProject1/ex008.py | EduardoGGoveia/Python | 0 | 6614136 | <filename>Python-Projects/Python-Projects-CursoemVideo/pythonProject1/ex008.py
v = int(input('Digite o valor de metros: '))
print('O valor convertido para centímetros é {}, e para milímetros é {}.'.format(v * 100, v * 1000))
| <filename>Python-Projects/Python-Projects-CursoemVideo/pythonProject1/ex008.py
v = int(input('Digite o valor de metros: '))
print('O valor convertido para centímetros é {}, e para milímetros é {}.'.format(v * 100, v * 1000))
| none | 1 | 3.466405 | 3 | |
bsagr/numbss.py | safoex/bsagr | 3 | 6614137 | """
Here the physical state is represented by numpy float 1d array.
An additional Compiler could be used to compile actions and whatsoever.
"""
import numpy as np
class BeliefNumAction:
def __init__(self, masks, values, preconditions):
self.masks = masks
self.values = values
self.precs = preconditions
def possible_states(self, bss):
assert isinstance(bss, NumBSS)
sel = np.ones(bss.bss.shape[0], dtype=np.bool)
for prec in self.precs:
sel *= prec(bss.bss)
return sel
class AndCondition:
def __init__(self, values):
class NumBSS:
def __init__(self, state):
self.bss = None
if isinstance(state, NumBSS):
self.bss = state.bss
if isinstance(state, np.ndarray) and len(state.shape) == 2:
self.bss = state
else:
self.bss = np.array([state])
def apply(self, action: BeliefNumAction, selector):
assert isinstance(action, BeliefNumAction)
states_selected = np.sum(selector)
nbss = np.tile(self.bss[selector], (action.masks.shape[0], 1))
mask = np.repeat(action.masks, states_selected, axis=0)
act = np.repeat(action.values, states_selected, axis=0)
nbss = np.where(mask, act, nbss)
print(nbss)
nbss[:, 0] = nbss[:, 0] * act[:, 0]
return NumBSS(nbss)
def apply_all(self, action: BeliefNumAction):
return self.apply(action, action.possible_states(self))
def apply_whether(self):
class Compiler:
def __init__(self):
self.dict = {'prob': 0}
self.uuid = 1
self._size = None
self.revdict = ['prob']
def add_var(self, name):
assert self._size is None
if isinstance(name, list):
for n in name:
self.add_var(n)
return
assert isinstance(name, str)
self.dict[name] = self.uuid
self.revdict.append(name)
self.uuid += 1
def fix_model(self):
self._size = self.uuid
def size(self):
assert self._size is not None
return self._size
def make_belief_num_action(self, action, preconditions):
masks = np.zeros((len(action), self.size()), dtype=np.bool)
values = np.zeros((len(action), self.size()), dtype=np.float)
for i, outcome in enumerate(action):
for var, val in outcome.items():
masks[i][self.dict[var]] = True
values[i][self.dict[var]] = val
masks[:, 0] = False
return BeliefNumAction(masks, values, preconditions)
def make_equality_condition(self, var, val):
index = self.dict[var]
return lambda s: s[:, index] == val
def make_initial_state(self, variables: dict):
variables.update({'prob': 1})
state = [variables[self.revdict[i]] if self.revdict[i] in variables else 0 for i in range(self.size())]
return NumBSS(np.array(state))
if __name__ == "__main__":
comp = Compiler()
comp.add_var(['a', 'b', 'c'])
comp.fix_model()
c1 = comp.make_equality_condition('a', 0)
c2 = comp.make_equality_condition('a', 1)
a1 = comp.make_belief_num_action([
{
'a': 1,
'prob': 0.5
},
{
'a': 2,
'prob': 0.5
}
], [c1])
a2 = comp.make_belief_num_action([
{
'b': 0,
'prob': 0.6
},
{
'b': 1,
'prob': 0.4
}
], [c2])
ins = comp.make_initial_state({'c': 3})
print(ins.bss)
print(ins.apply_all(a1).apply_all(a2).bss)
| """
Here the physical state is represented by numpy float 1d array.
An additional Compiler could be used to compile actions and whatsoever.
"""
import numpy as np
class BeliefNumAction:
def __init__(self, masks, values, preconditions):
self.masks = masks
self.values = values
self.precs = preconditions
def possible_states(self, bss):
assert isinstance(bss, NumBSS)
sel = np.ones(bss.bss.shape[0], dtype=np.bool)
for prec in self.precs:
sel *= prec(bss.bss)
return sel
class AndCondition:
def __init__(self, values):
class NumBSS:
def __init__(self, state):
self.bss = None
if isinstance(state, NumBSS):
self.bss = state.bss
if isinstance(state, np.ndarray) and len(state.shape) == 2:
self.bss = state
else:
self.bss = np.array([state])
def apply(self, action: BeliefNumAction, selector):
assert isinstance(action, BeliefNumAction)
states_selected = np.sum(selector)
nbss = np.tile(self.bss[selector], (action.masks.shape[0], 1))
mask = np.repeat(action.masks, states_selected, axis=0)
act = np.repeat(action.values, states_selected, axis=0)
nbss = np.where(mask, act, nbss)
print(nbss)
nbss[:, 0] = nbss[:, 0] * act[:, 0]
return NumBSS(nbss)
def apply_all(self, action: BeliefNumAction):
return self.apply(action, action.possible_states(self))
def apply_whether(self):
class Compiler:
def __init__(self):
self.dict = {'prob': 0}
self.uuid = 1
self._size = None
self.revdict = ['prob']
def add_var(self, name):
assert self._size is None
if isinstance(name, list):
for n in name:
self.add_var(n)
return
assert isinstance(name, str)
self.dict[name] = self.uuid
self.revdict.append(name)
self.uuid += 1
def fix_model(self):
self._size = self.uuid
def size(self):
assert self._size is not None
return self._size
def make_belief_num_action(self, action, preconditions):
masks = np.zeros((len(action), self.size()), dtype=np.bool)
values = np.zeros((len(action), self.size()), dtype=np.float)
for i, outcome in enumerate(action):
for var, val in outcome.items():
masks[i][self.dict[var]] = True
values[i][self.dict[var]] = val
masks[:, 0] = False
return BeliefNumAction(masks, values, preconditions)
def make_equality_condition(self, var, val):
index = self.dict[var]
return lambda s: s[:, index] == val
def make_initial_state(self, variables: dict):
variables.update({'prob': 1})
state = [variables[self.revdict[i]] if self.revdict[i] in variables else 0 for i in range(self.size())]
return NumBSS(np.array(state))
if __name__ == "__main__":
comp = Compiler()
comp.add_var(['a', 'b', 'c'])
comp.fix_model()
c1 = comp.make_equality_condition('a', 0)
c2 = comp.make_equality_condition('a', 1)
a1 = comp.make_belief_num_action([
{
'a': 1,
'prob': 0.5
},
{
'a': 2,
'prob': 0.5
}
], [c1])
a2 = comp.make_belief_num_action([
{
'b': 0,
'prob': 0.6
},
{
'b': 1,
'prob': 0.4
}
], [c2])
ins = comp.make_initial_state({'c': 3})
print(ins.bss)
print(ins.apply_all(a1).apply_all(a2).bss)
| en | 0.945755 | Here the physical state is represented by numpy float 1d array. An additional Compiler could be used to compile actions and whatsoever. | 2.864501 | 3 |
UE4Parse/IoObjects/FImportedPackage.py | MinshuG/pyUE4Parse | 13 | 6614138 | from typing import List
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.IO.IoObjects.FIoChunkId import FIoChunkId
from UE4Parse.IoObjects.EIoChunkType import EIoChunkType
class FArc:
fromIndex: int
toIndex: int
def __init__(self, reader: BinaryStream):
self.fromIndex = reader.readInt32()
self.toIndex = reader.readInt32()
class FPackageId:
Id: int
def __init__(self, reader: BinaryStream):
self.Id = reader.readUInt64()
def __str__(self) -> str:
return str(self.Id)
class FImportedPackage:
index: FPackageId
Arcs: List[FArc]
def __init__(self, reader: BinaryStream):
self.index = FPackageId(reader)
self.Arcs = reader.readTArray(FArc, reader)
| from typing import List
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.IO.IoObjects.FIoChunkId import FIoChunkId
from UE4Parse.IoObjects.EIoChunkType import EIoChunkType
class FArc:
fromIndex: int
toIndex: int
def __init__(self, reader: BinaryStream):
self.fromIndex = reader.readInt32()
self.toIndex = reader.readInt32()
class FPackageId:
Id: int
def __init__(self, reader: BinaryStream):
self.Id = reader.readUInt64()
def __str__(self) -> str:
return str(self.Id)
class FImportedPackage:
index: FPackageId
Arcs: List[FArc]
def __init__(self, reader: BinaryStream):
self.index = FPackageId(reader)
self.Arcs = reader.readTArray(FArc, reader)
| none | 1 | 2.442401 | 2 | |
bha/__init__.py | alexisthual/BHA | 5 | 6614139 | <filename>bha/__init__.py
#from bha import BHA, BMDS
| <filename>bha/__init__.py
#from bha import BHA, BMDS
| en | 0.264802 | #from bha import BHA, BMDS | 1.116686 | 1 |
tests/prom/metrics/general/test_asm_diskgroup.py | IntershopCommunicationsAG/ish-monitoring-oracledb-exporter | 1 | 6614140 | <filename>tests/prom/metrics/general/test_asm_diskgroup.py
from unittest import TestCase
from prometheus_client.registry import CollectorRegistry
from app.prom.metrics.general.asm_diskgroup import ASMDiskGroup, NAME, TOTAL, FREE
from tests.helpers import setUpApp, with_context
class TestASMDiskGroup(TestCase):
def setUp(self):
setUpApp(self)
@with_context
def test_should_collect(self):
test_data_1 = {NAME: 'test_1', TOTAL: 300, FREE: 100}
test_data_2 = {NAME: 'test_2', TOTAL: 3, FREE: 1}
asm_diskgroup = ASMDiskGroup(CollectorRegistry())
asm_diskgroup.collect(self.app, rows=(_ for _ in [test_data_1, test_data_2]))
samples = next(iter(asm_diskgroup.total_size_metric.collect())).samples
iter_samples = iter(samples)
self.assert_sample_metrics(iter_samples, test_data_1, TOTAL)
self.assert_sample_metrics(iter_samples, test_data_2, TOTAL)
samples = next(iter(asm_diskgroup.free_space_metric.collect())).samples
iter_samples = iter(samples)
self.assert_sample_metrics(iter_samples, test_data_1, FREE)
self.assert_sample_metrics(iter_samples, test_data_2, FREE)
def assert_sample_metrics(self, iter_samples, test_data, value_name):
sample = next(iter_samples)
self.assertEqual(test_data[value_name], sample.value)
self.assertEqual(test_data[NAME], sample.labels[NAME])
| <filename>tests/prom/metrics/general/test_asm_diskgroup.py
from unittest import TestCase
from prometheus_client.registry import CollectorRegistry
from app.prom.metrics.general.asm_diskgroup import ASMDiskGroup, NAME, TOTAL, FREE
from tests.helpers import setUpApp, with_context
class TestASMDiskGroup(TestCase):
def setUp(self):
setUpApp(self)
@with_context
def test_should_collect(self):
test_data_1 = {NAME: 'test_1', TOTAL: 300, FREE: 100}
test_data_2 = {NAME: 'test_2', TOTAL: 3, FREE: 1}
asm_diskgroup = ASMDiskGroup(CollectorRegistry())
asm_diskgroup.collect(self.app, rows=(_ for _ in [test_data_1, test_data_2]))
samples = next(iter(asm_diskgroup.total_size_metric.collect())).samples
iter_samples = iter(samples)
self.assert_sample_metrics(iter_samples, test_data_1, TOTAL)
self.assert_sample_metrics(iter_samples, test_data_2, TOTAL)
samples = next(iter(asm_diskgroup.free_space_metric.collect())).samples
iter_samples = iter(samples)
self.assert_sample_metrics(iter_samples, test_data_1, FREE)
self.assert_sample_metrics(iter_samples, test_data_2, FREE)
def assert_sample_metrics(self, iter_samples, test_data, value_name):
sample = next(iter_samples)
self.assertEqual(test_data[value_name], sample.value)
self.assertEqual(test_data[NAME], sample.labels[NAME])
| none | 1 | 2.204236 | 2 | |
pypospack/potential/eamdens_mishin2004.py | eragasa/pypospack | 4 | 6614141 | __author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017"
__license__ = "Simplified BSD License"
__version__ = 20171102
import copy,inspect
import numpy as np
from collections import OrderedDict
from pypospack.potential import EamDensityFunction
from pypospack.potential import determine_symbol_pairs
def func_cutoff_mishin2004(r, rc, hc, h0):
if isinstance(r,float):
if r > rc:
ind_rc = 0
else:
ind_rc = 1
xrc = (r-rc)/hc
psi_c = (xrc**4)/(1+xrc**4)
x0 = r/h0
psi_0 = (x0**4)/(1+x0**4)
psi = psi_c * psi_0 * ind_rc
else:
ind_rc = np.ones(r.size)
ind_rc[r > rc] = 0
xrc = (r-rc)/hc
psi_c = (xrc**4)/(1+xrc**4)
x0 = r/h0
psi_0 = (x0**4)/(1+x0**4)
psi = psi_c * psi_0 * ind_rc
return psi
def func_density_mishin2004(r,r0,A0,B0,C0,y,gamma):
z = r - r0
phi = A0 * z**y * np.exp(-gamma*z) * (1 + B0 * np.exp(-gamma*z)) + C0
return phi
def func_density_mishin2004_w_cutoff(r, r0, A0, B0, C0, y, gamma, rc, hc, h0):
phi = func_density_mishin2004(r, r0, A0, B0, C0, y, gamma)
psi = func_cutoff_mishin2004(r, rc, hc, h0)
return psi*phi
class Mishin2004DensityFunction(EamDensityFunction):
"""
Args:
symbols(list of str)
Attributes:
symbols(list of str)
potential_type(str): This is set to 'eamembed_universal'
parameter_names(list of str)
parameters(OrderedDict): The key is the symbol associated with the
embedding function. On initialization, the value of each parameter
is set to None.
density(OrderedDict): The key is the symbol associated with the
embedding function.
N_r(int)
d_r(float)
r_max(float)
r(numpy.ndarray)
"""
potential_type = 'eamdens_mishin2004'
density_function = func_density_mishin2004_w_cutoff
density_function_parameters = ['r0', 'A0','B0','C0','y','gamma', 'rc', 'hc', 'h0']
def __init__(self,symbols):
EamDensityFunction.__init__(
self,
symbols=symbols,
potential_type=Mishin2004DensityFunction.potential_type)
def _init_parameter_names(self):
self.parameter_names = []
for s in self.symbols:
for p in self.density_function_parameters:
pn = "{}_{}".format(s,p)
self.parameter_names.append(pn)
def _init_parameters(self):
self.parameters = OrderedDict()
for p in self.parameter_names:
self.parameters[p] = None
def evaluate(self,r,parameters,r_cut=None):
"""
Given a vector of interatomic distances, r, passed in as variable
r, and the associated parameters of the potential. This method
sets the density attribute.
Args:
r(numpy.ndarray): This should be named as rho because it
represents the electron density being evaluated.
parameters(OrderedDict): This is a dictionary of the parameters
of the embedding function for each atom. The key is a
string containing the ISO chemical symbol of the element.
The value should be a numeric value.
r_cut(float): This would be the density cutoff. However the
embedding energy is increasing with increasing electron
density so the a r_cut has no physical meaning. Any
variable passed into r_cut will be ignored.
"""
assert isinstance(r,np.ndarray) or isinstance(r,float)
assert isinstance(parameters,dict)
assert type(r_cut) in [int,float,type(None)]
# attribute.parameters[p] <--- arg:parameters[p]
for s in self.symbols:
for p in self.density_function_parameters:
pn = "{}_{}".format(s,p)
try:
self.parameters[pn] = parameters[pn]
except KeyError as e:
print(80*'-')
print("{:^80}".format("DEBUGGING INFORMATION"))
print(80*'-')
print('pn:{}'.format(pn))
print('arg -> parameters:')
for k,v in parameters.items():
print(" {}:{}".format(k,v))
print('attr -> density_func_parameters')
for v in self.density_function_parameters:
print(" {}".format(v))
raise
# cannot evaluate because
for pn,pv in self.parameters.items():
if pv is None:
return False
self.density_evaluations = OrderedDict()
# each species has a unique density function
for s in self.symbols:
params = [self.parameters['{}_{}'.format(s,k)] for k in self.density_function_parameters]
self.density_evaluations[s] = Mishin2004DensityFunction.density_function(r,*params)
return copy.deepcopy(self.density_evaluations)
if __name__ == '__main__':
symbols = ['Ni']
p = ExponentialDensityFunction(symbols=symbols)
print(p.potential_type)
| __author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017"
__license__ = "Simplified BSD License"
__version__ = 20171102
import copy,inspect
import numpy as np
from collections import OrderedDict
from pypospack.potential import EamDensityFunction
from pypospack.potential import determine_symbol_pairs
def func_cutoff_mishin2004(r, rc, hc, h0):
if isinstance(r,float):
if r > rc:
ind_rc = 0
else:
ind_rc = 1
xrc = (r-rc)/hc
psi_c = (xrc**4)/(1+xrc**4)
x0 = r/h0
psi_0 = (x0**4)/(1+x0**4)
psi = psi_c * psi_0 * ind_rc
else:
ind_rc = np.ones(r.size)
ind_rc[r > rc] = 0
xrc = (r-rc)/hc
psi_c = (xrc**4)/(1+xrc**4)
x0 = r/h0
psi_0 = (x0**4)/(1+x0**4)
psi = psi_c * psi_0 * ind_rc
return psi
def func_density_mishin2004(r,r0,A0,B0,C0,y,gamma):
z = r - r0
phi = A0 * z**y * np.exp(-gamma*z) * (1 + B0 * np.exp(-gamma*z)) + C0
return phi
def func_density_mishin2004_w_cutoff(r, r0, A0, B0, C0, y, gamma, rc, hc, h0):
phi = func_density_mishin2004(r, r0, A0, B0, C0, y, gamma)
psi = func_cutoff_mishin2004(r, rc, hc, h0)
return psi*phi
class Mishin2004DensityFunction(EamDensityFunction):
"""
Args:
symbols(list of str)
Attributes:
symbols(list of str)
potential_type(str): This is set to 'eamembed_universal'
parameter_names(list of str)
parameters(OrderedDict): The key is the symbol associated with the
embedding function. On initialization, the value of each parameter
is set to None.
density(OrderedDict): The key is the symbol associated with the
embedding function.
N_r(int)
d_r(float)
r_max(float)
r(numpy.ndarray)
"""
potential_type = 'eamdens_mishin2004'
density_function = func_density_mishin2004_w_cutoff
density_function_parameters = ['r0', 'A0','B0','C0','y','gamma', 'rc', 'hc', 'h0']
def __init__(self,symbols):
EamDensityFunction.__init__(
self,
symbols=symbols,
potential_type=Mishin2004DensityFunction.potential_type)
def _init_parameter_names(self):
self.parameter_names = []
for s in self.symbols:
for p in self.density_function_parameters:
pn = "{}_{}".format(s,p)
self.parameter_names.append(pn)
def _init_parameters(self):
self.parameters = OrderedDict()
for p in self.parameter_names:
self.parameters[p] = None
def evaluate(self,r,parameters,r_cut=None):
"""
Given a vector of interatomic distances, r, passed in as variable
r, and the associated parameters of the potential. This method
sets the density attribute.
Args:
r(numpy.ndarray): This should be named as rho because it
represents the electron density being evaluated.
parameters(OrderedDict): This is a dictionary of the parameters
of the embedding function for each atom. The key is a
string containing the ISO chemical symbol of the element.
The value should be a numeric value.
r_cut(float): This would be the density cutoff. However the
embedding energy is increasing with increasing electron
density so the a r_cut has no physical meaning. Any
variable passed into r_cut will be ignored.
"""
assert isinstance(r,np.ndarray) or isinstance(r,float)
assert isinstance(parameters,dict)
assert type(r_cut) in [int,float,type(None)]
# attribute.parameters[p] <--- arg:parameters[p]
for s in self.symbols:
for p in self.density_function_parameters:
pn = "{}_{}".format(s,p)
try:
self.parameters[pn] = parameters[pn]
except KeyError as e:
print(80*'-')
print("{:^80}".format("DEBUGGING INFORMATION"))
print(80*'-')
print('pn:{}'.format(pn))
print('arg -> parameters:')
for k,v in parameters.items():
print(" {}:{}".format(k,v))
print('attr -> density_func_parameters')
for v in self.density_function_parameters:
print(" {}".format(v))
raise
# cannot evaluate because
for pn,pv in self.parameters.items():
if pv is None:
return False
self.density_evaluations = OrderedDict()
# each species has a unique density function
for s in self.symbols:
params = [self.parameters['{}_{}'.format(s,k)] for k in self.density_function_parameters]
self.density_evaluations[s] = Mishin2004DensityFunction.density_function(r,*params)
return copy.deepcopy(self.density_evaluations)
if __name__ == '__main__':
symbols = ['Ni']
p = ExponentialDensityFunction(symbols=symbols)
print(p.potential_type)
| en | 0.694995 | Args: symbols(list of str) Attributes: symbols(list of str) potential_type(str): This is set to 'eamembed_universal' parameter_names(list of str) parameters(OrderedDict): The key is the symbol associated with the embedding function. On initialization, the value of each parameter is set to None. density(OrderedDict): The key is the symbol associated with the embedding function. N_r(int) d_r(float) r_max(float) r(numpy.ndarray) Given a vector of interatomic distances, r, passed in as variable r, and the associated parameters of the potential. This method sets the density attribute. Args: r(numpy.ndarray): This should be named as rho because it represents the electron density being evaluated. parameters(OrderedDict): This is a dictionary of the parameters of the embedding function for each atom. The key is a string containing the ISO chemical symbol of the element. The value should be a numeric value. r_cut(float): This would be the density cutoff. However the embedding energy is increasing with increasing electron density so the a r_cut has no physical meaning. Any variable passed into r_cut will be ignored. # attribute.parameters[p] <--- arg:parameters[p] # cannot evaluate because # each species has a unique density function | 2.073159 | 2 |
micropython/Sensorkit/FlameSensor.py | HaraldBlab/sensorkit-library | 0 | 6614142 | <reponame>HaraldBlab/sensorkit-library
from machine import Pin, ADC
# A sensor to measure the (flame) heat of the environment
# high values indicate not a (flame) heat
# low values indicate a (flame) heat
# trim poti adjusts around value 500 only-
class FlameSensor():
def __init__(self, DETECT, POT):
global pinDetect, pot
pinDetect = Pin(DETECT, Pin.IN)
pot = ADC(POT)
def value(self):
return pot.read()
def detect(self):
return pinDetect.value()
| from machine import Pin, ADC
# A sensor to measure the (flame) heat of the environment
# high values indicate not a (flame) heat
# low values indicate a (flame) heat
# trim poti adjusts around value 500 only-
class FlameSensor():
def __init__(self, DETECT, POT):
global pinDetect, pot
pinDetect = Pin(DETECT, Pin.IN)
pot = ADC(POT)
def value(self):
return pot.read()
def detect(self):
return pinDetect.value() | en | 0.739549 | # A sensor to measure the (flame) heat of the environment # high values indicate not a (flame) heat # low values indicate a (flame) heat # trim poti adjusts around value 500 only- | 3.296197 | 3 |
web-api/qiwi_api/client.py | Egor4ik325/anyberry | 1 | 6614143 | <reponame>Egor4ik325/anyberry
"""
QIWI API Python client library.
"""
from __future__ import annotations
from enum import Enum
from json.decoder import JSONDecodeError
from typing import Literal, TypedDict
from uuid import UUID
import requests
from dateutil.parser import isoparse
from django.conf import settings
from moneyed import Money
from requests.models import HTTPError
from .exceptions import QIWIAPIError
# API module constants
API_ORIGIN = "https://edge.qiwi.com"
P2P_API_ORIGIN = "https://api.qiwi.com"
class QIWIAPIClient:
"""Singleton class for QIWI API."""
# API configuration class fields
api_token = settings.QIWI_API_TOKEN
public_key = settings.QIWI_P2P_PUBLIC_KEY
secret_key = settings.QIWI_P2P_SECRET_KEY
endpoints = {
"get_bill": f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{{bill_id}}",
"reject_bill": f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{{bill_id}}/reject",
}
def __init__(self):
base_headers = {
"content-type": "application/json",
"accept": "application/json",
"authorization": f"Bearer {self.secret_key}",
}
self.session = requests.Session()
self.session.headers = base_headers # type: ignore
def invoice_bill(self, bill_id: str, amount_value, amount_currency="RUB", comment="Berry bill"):
# TODO: add more parametors (customer id, phone, email + customFields)
base_url = f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{bill_id}"
body = {
"amount": {
"currency": amount_currency,
"value": amount_value,
},
"comment": comment,
}
try:
response = self.session.put(base_url, json=body)
response.raise_for_status()
bill_dict = response.json()
return bill_dict
except Exception as e:
raise QIWIAPIError from e
def get_bill(self, bill_id: UUID) -> Bill:
response = self.session.get(
self.endpoints["get_bill"].format(bill_id=str(bill_id)))
# Check for exceptions on 4xx, 5xx, 6xx status codes
try:
response.raise_for_status()
except HTTPError as e:
# TODO: parse body to get as much information about the error as possible
raise QIWIAPIError from e
# Check for parsing exceptions
try:
bill_dict = response.json()
return Bill(**bill_dict)
except JSONDecodeError as e:
raise QIWIAPIError from e
def reject_bill(self, bill_id: UUID) -> Bill:
"""Rejects the bill and returns the resulted bill with REJECTED status."""
response = self.session.post(
self.endpoints["reject_bill"].format(bill_id=bill_id))
# Check for client or server errors (provide library client with error feedback)
try:
response.raise_for_status()
except HTTPError as e:
# TODO: initialize exception based on error response.data
raise QIWIAPIError() from e
try:
bill_dict = response.json()
except JSONDecodeError as e:
raise QIWIAPIError("Response was not in JSON file format.") from e
return Bill(**bill_dict)
class Bill:
"""Bill model
- coverts bill dictionary (JavaScript object) to Python object.
1. Unpacks dictionary to kwargs (simplify init object fields accessing)
"""
class AmountDictType(TypedDict):
currency: str
value: str
class StatusDictType(TypedDict):
value: StatusValue
changedDateTime: str
class Amount(Money):
"""QIWI API amount model"""
def __init__(self, value: str, currency: str):
super().__init__(value, currency)
class Status:
"""QIWI API bill status model."""
def __init__(self, value: StatusValue, changedDateTime: str):
self.value = value
self.changed_date_time = isoparse(changedDateTime)
def __init__(self, siteId: str, billId: str, amount: Bill.AmountDictType, status: Bill.StatusDictType, comment: str, creationDateTime: str, expirationDateTime: str, payUrl: str, *args, **kwargs):
self.site_id = siteId
self.bill_id = UUID(billId)
self.amount = Bill.Amount(**amount)
self.status = Bill.Status(**status)
self.comment = comment
# datetime.fromiso doesn't support ISO
self.creation_date_time = isoparse(creationDateTime)
self.expiration_date_time = isoparse(expirationDateTime)
self.pay_url = payUrl
class Status(Enum):
WAITING = "WAITING"
PAID = "PAID"
REJECTED = "REJECTED"
EXPIRED = "EXPIRED"
StatusValue = Literal["WAITING", "PAID", "REJECTED", "EXPIRED"]
| """
QIWI API Python client library.
"""
from __future__ import annotations
from enum import Enum
from json.decoder import JSONDecodeError
from typing import Literal, TypedDict
from uuid import UUID
import requests
from dateutil.parser import isoparse
from django.conf import settings
from moneyed import Money
from requests.models import HTTPError
from .exceptions import QIWIAPIError
# API module constants
API_ORIGIN = "https://edge.qiwi.com"
P2P_API_ORIGIN = "https://api.qiwi.com"
class QIWIAPIClient:
"""Singleton class for QIWI API."""
# API configuration class fields
api_token = settings.QIWI_API_TOKEN
public_key = settings.QIWI_P2P_PUBLIC_KEY
secret_key = settings.QIWI_P2P_SECRET_KEY
endpoints = {
"get_bill": f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{{bill_id}}",
"reject_bill": f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{{bill_id}}/reject",
}
def __init__(self):
base_headers = {
"content-type": "application/json",
"accept": "application/json",
"authorization": f"Bearer {self.secret_key}",
}
self.session = requests.Session()
self.session.headers = base_headers # type: ignore
def invoice_bill(self, bill_id: str, amount_value, amount_currency="RUB", comment="Berry bill"):
# TODO: add more parametors (customer id, phone, email + customFields)
base_url = f"{P2P_API_ORIGIN}/partner/bill/v1/bills/{bill_id}"
body = {
"amount": {
"currency": amount_currency,
"value": amount_value,
},
"comment": comment,
}
try:
response = self.session.put(base_url, json=body)
response.raise_for_status()
bill_dict = response.json()
return bill_dict
except Exception as e:
raise QIWIAPIError from e
def get_bill(self, bill_id: UUID) -> Bill:
response = self.session.get(
self.endpoints["get_bill"].format(bill_id=str(bill_id)))
# Check for exceptions on 4xx, 5xx, 6xx status codes
try:
response.raise_for_status()
except HTTPError as e:
# TODO: parse body to get as much information about the error as possible
raise QIWIAPIError from e
# Check for parsing exceptions
try:
bill_dict = response.json()
return Bill(**bill_dict)
except JSONDecodeError as e:
raise QIWIAPIError from e
def reject_bill(self, bill_id: UUID) -> Bill:
"""Rejects the bill and returns the resulted bill with REJECTED status."""
response = self.session.post(
self.endpoints["reject_bill"].format(bill_id=bill_id))
# Check for client or server errors (provide library client with error feedback)
try:
response.raise_for_status()
except HTTPError as e:
# TODO: initialize exception based on error response.data
raise QIWIAPIError() from e
try:
bill_dict = response.json()
except JSONDecodeError as e:
raise QIWIAPIError("Response was not in JSON file format.") from e
return Bill(**bill_dict)
class Bill:
"""Bill model
- coverts bill dictionary (JavaScript object) to Python object.
1. Unpacks dictionary to kwargs (simplify init object fields accessing)
"""
class AmountDictType(TypedDict):
currency: str
value: str
class StatusDictType(TypedDict):
value: StatusValue
changedDateTime: str
class Amount(Money):
"""QIWI API amount model"""
def __init__(self, value: str, currency: str):
super().__init__(value, currency)
class Status:
"""QIWI API bill status model."""
def __init__(self, value: StatusValue, changedDateTime: str):
self.value = value
self.changed_date_time = isoparse(changedDateTime)
def __init__(self, siteId: str, billId: str, amount: Bill.AmountDictType, status: Bill.StatusDictType, comment: str, creationDateTime: str, expirationDateTime: str, payUrl: str, *args, **kwargs):
self.site_id = siteId
self.bill_id = UUID(billId)
self.amount = Bill.Amount(**amount)
self.status = Bill.Status(**status)
self.comment = comment
# datetime.fromiso doesn't support ISO
self.creation_date_time = isoparse(creationDateTime)
self.expiration_date_time = isoparse(expirationDateTime)
self.pay_url = payUrl
class Status(Enum):
WAITING = "WAITING"
PAID = "PAID"
REJECTED = "REJECTED"
EXPIRED = "EXPIRED"
StatusValue = Literal["WAITING", "PAID", "REJECTED", "EXPIRED"] | en | 0.675315 | QIWI API Python client library. # API module constants Singleton class for QIWI API. # API configuration class fields # type: ignore # TODO: add more parametors (customer id, phone, email + customFields) # Check for exceptions on 4xx, 5xx, 6xx status codes # TODO: parse body to get as much information about the error as possible # Check for parsing exceptions Rejects the bill and returns the resulted bill with REJECTED status. # Check for client or server errors (provide library client with error feedback) # TODO: initialize exception based on error response.data Bill model - coverts bill dictionary (JavaScript object) to Python object. 1. Unpacks dictionary to kwargs (simplify init object fields accessing) QIWI API amount model QIWI API bill status model. # datetime.fromiso doesn't support ISO | 2.067914 | 2 |
user_profile/admin.py | KeoH/django-keoh-userprofile | 9 | 6614144 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.safestring import mark_safe
from .models import UserProfile
class UserProfileAdmin(UserAdmin):
list_display = UserAdmin.list_display + ('avatar_admin',)
def avatar_admin(self, obj):
return mark_safe('<figure><img width="60px" height="60px" src="{}"></figure>'.format(obj.avatar.url)) # noqa
avatar_admin.allow_tags = True
avatar_admin.short_description = 'Avatar'
fieldsets = UserAdmin.fieldsets + (
('User Profile', {'fields': (
'avatar',
)}),
)
admin.site.register(UserProfile, UserProfileAdmin)
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.safestring import mark_safe
from .models import UserProfile
class UserProfileAdmin(UserAdmin):
list_display = UserAdmin.list_display + ('avatar_admin',)
def avatar_admin(self, obj):
return mark_safe('<figure><img width="60px" height="60px" src="{}"></figure>'.format(obj.avatar.url)) # noqa
avatar_admin.allow_tags = True
avatar_admin.short_description = 'Avatar'
fieldsets = UserAdmin.fieldsets + (
('User Profile', {'fields': (
'avatar',
)}),
)
admin.site.register(UserProfile, UserProfileAdmin)
| none | 1 | 1.962065 | 2 | |
hw5.py | shirih/hw5 | 0 | 6614145 | import json
from typing import Union, Tuple
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import math
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
self.data_fname = pathlib.Path(data_fname).resolve()
if not self.data_fname.exists():
raise ValueError("Given file doesn't exist.")
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data = pd.read_json(self.data_fname)
def show_age_distrib(self) -> Tuple[np.ndarray, np.ndarray]:
"""Calculates and plots the age distribution of the participants.
Returns
-------
hist : np.ndarray
Number of people in a given bin
bins : np.ndarray
Bin edges
"""
fig = plt.figure()
hist, edges, patches = fig.add_subplot(111).hist(self.data["age"], np.linspace(0, 100, 11))
return hist, edges
def remove_rows_without_mail(self) -> pd.DataFrame:
"""Checks self.data for rows with invalid emails, and removes them.
Returns
-------
df : pd.DataFrame
A corrected DataFrame, i.e. the same table but with the erroneous rows removed and
the (ordinal) index after a reset.
"""
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{1,3})+$'
for index, row in self.data.iterrows():
if not re.search(regex,row["email"]):
self.data.drop(index, inplace=True)
return self.data.reset_index()
def fill_na_with_mean(self) -> Tuple[pd.DataFrame, np.ndarray]:
"""Finds, in the original DataFrame, the subjects that didn't answer
all questions, and replaces that missing value with the mean of the
other grades for that student.
Returns
-------
df : pd.DataFrame
The corrected DataFrame after insertion of the mean grade
arr : np.ndarray
Row indices of the students that their new grades were generated
"""
return_df = self.data
grades_df = self.data.loc[:, "q1":"q5"].stack().unstack(level=0)
means = grades_df.mean(axis=0)
_, columns = np.where(grades_df.isna())
items_with_nans = np.unique(columns)
print(grades_df.fillna(means[items_with_nans], axis=0))
return_df.loc[:, "q1":"q5"] = grades_df.fillna(means[items_with_nans], axis=0).stack().unstack(level=0)
return return_df, items_with_nans
def score_subjects(self, maximal_nans_per_sub: int = 1) -> pd.DataFrame:
"""Calculates the average score of a subject and adds a new "score" column
with it.
If the subject has more than "maximal_nans_per_sub" NaN in his grades, the
score should be NA. Otherwise, the score is simply the mean of the other grades.
The datatype of score is UInt8, and the floating point raw numbers should be
rounded down.
Parameters
----------
maximal_nans_per_sub : int, optional
Number of allowed NaNs per subject before giving a NA score.
Returns
-------
pd.DataFrame
A new DF with a new column - "score".
"""
return_df = self.data
grades_df = self.data.loc[:, "q1":"q5"]
means = grades_df.mean(axis=1).apply(np.floor).astype('UInt8')
return_df['score'] = means.values
for index, row in grades_df.iterrows():
if row.isna().sum() > maximal_nans_per_sub:
row['score'] = np.nan
return_df.at[index] = row
print(return_df)
return return_df
| import json
from typing import Union, Tuple
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import math
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
self.data_fname = pathlib.Path(data_fname).resolve()
if not self.data_fname.exists():
raise ValueError("Given file doesn't exist.")
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data = pd.read_json(self.data_fname)
def show_age_distrib(self) -> Tuple[np.ndarray, np.ndarray]:
"""Calculates and plots the age distribution of the participants.
Returns
-------
hist : np.ndarray
Number of people in a given bin
bins : np.ndarray
Bin edges
"""
fig = plt.figure()
hist, edges, patches = fig.add_subplot(111).hist(self.data["age"], np.linspace(0, 100, 11))
return hist, edges
def remove_rows_without_mail(self) -> pd.DataFrame:
"""Checks self.data for rows with invalid emails, and removes them.
Returns
-------
df : pd.DataFrame
A corrected DataFrame, i.e. the same table but with the erroneous rows removed and
the (ordinal) index after a reset.
"""
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{1,3})+$'
for index, row in self.data.iterrows():
if not re.search(regex,row["email"]):
self.data.drop(index, inplace=True)
return self.data.reset_index()
def fill_na_with_mean(self) -> Tuple[pd.DataFrame, np.ndarray]:
"""Finds, in the original DataFrame, the subjects that didn't answer
all questions, and replaces that missing value with the mean of the
other grades for that student.
Returns
-------
df : pd.DataFrame
The corrected DataFrame after insertion of the mean grade
arr : np.ndarray
Row indices of the students that their new grades were generated
"""
return_df = self.data
grades_df = self.data.loc[:, "q1":"q5"].stack().unstack(level=0)
means = grades_df.mean(axis=0)
_, columns = np.where(grades_df.isna())
items_with_nans = np.unique(columns)
print(grades_df.fillna(means[items_with_nans], axis=0))
return_df.loc[:, "q1":"q5"] = grades_df.fillna(means[items_with_nans], axis=0).stack().unstack(level=0)
return return_df, items_with_nans
def score_subjects(self, maximal_nans_per_sub: int = 1) -> pd.DataFrame:
"""Calculates the average score of a subject and adds a new "score" column
with it.
If the subject has more than "maximal_nans_per_sub" NaN in his grades, the
score should be NA. Otherwise, the score is simply the mean of the other grades.
The datatype of score is UInt8, and the floating point raw numbers should be
rounded down.
Parameters
----------
maximal_nans_per_sub : int, optional
Number of allowed NaNs per subject before giving a NA score.
Returns
-------
pd.DataFrame
A new DF with a new column - "score".
"""
return_df = self.data
grades_df = self.data.loc[:, "q1":"q5"]
means = grades_df.mean(axis=1).apply(np.floor).astype('UInt8')
return_df['score'] = means.values
for index, row in grades_df.iterrows():
if row.isna().sum() > maximal_nans_per_sub:
row['score'] = np.nan
return_df.at[index] = row
print(return_df)
return return_df
| en | 0.842894 | Reads and analyzes data generated by the questionnaire experiment. Should be able to accept strings and pathlib.Path objects. Reads the json data located in self.data_fname into memory, to the attribute self.data. Calculates and plots the age distribution of the participants. Returns ------- hist : np.ndarray Number of people in a given bin bins : np.ndarray Bin edges Checks self.data for rows with invalid emails, and removes them. Returns ------- df : pd.DataFrame A corrected DataFrame, i.e. the same table but with the erroneous rows removed and the (ordinal) index after a reset. Finds, in the original DataFrame, the subjects that didn't answer all questions, and replaces that missing value with the mean of the other grades for that student. Returns ------- df : pd.DataFrame The corrected DataFrame after insertion of the mean grade arr : np.ndarray Row indices of the students that their new grades were generated Calculates the average score of a subject and adds a new "score" column with it. If the subject has more than "maximal_nans_per_sub" NaN in his grades, the score should be NA. Otherwise, the score is simply the mean of the other grades. The datatype of score is UInt8, and the floating point raw numbers should be rounded down. Parameters ---------- maximal_nans_per_sub : int, optional Number of allowed NaNs per subject before giving a NA score. Returns ------- pd.DataFrame A new DF with a new column - "score". | 3.255137 | 3 |
creator/ingest_runs/migrations/0004_add_ingest_error_msg.py | kids-first/kf-api-study-creator | 3 | 6614146 | <reponame>kids-first/kf-api-study-creator
# Generated by Django 2.2.20 on 2021-05-13 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ingest_runs', '0003_add_validation_models'),
]
operations = [
migrations.AddField(
model_name='ingestrun',
name='error_msg',
field=models.TextField(blank=True, help_text='The error message that is a product of an ingest process failing. This field is populated in the exception handler before the fail method is called on the process.', null=True),
),
migrations.AddField(
model_name='validationrun',
name='error_msg',
field=models.TextField(blank=True, help_text='The error message that is a product of an ingest process failing. This field is populated in the exception handler before the fail method is called on the process.', null=True),
),
]
| # Generated by Django 2.2.20 on 2021-05-13 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ingest_runs', '0003_add_validation_models'),
]
operations = [
migrations.AddField(
model_name='ingestrun',
name='error_msg',
field=models.TextField(blank=True, help_text='The error message that is a product of an ingest process failing. This field is populated in the exception handler before the fail method is called on the process.', null=True),
),
migrations.AddField(
model_name='validationrun',
name='error_msg',
field=models.TextField(blank=True, help_text='The error message that is a product of an ingest process failing. This field is populated in the exception handler before the fail method is called on the process.', null=True),
),
] | en | 0.867361 | # Generated by Django 2.2.20 on 2021-05-13 17:41 | 1.718452 | 2 |
aldryn_search/urls.py | lab360-ch/aldryn-search | 11 | 6614147 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from aldryn_search.views import AldrynSearchView
urlpatterns = [
url('^$', AldrynSearchView.as_view(), name='aldryn-search'),
]
| # -*- coding: utf-8 -*-
from django.conf.urls import url
from aldryn_search.views import AldrynSearchView
urlpatterns = [
url('^$', AldrynSearchView.as_view(), name='aldryn-search'),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.371809 | 1 |
storage/state_ts_store.py | andkononykhin/plenum | 148 | 6614148 | from typing import Dict
from plenum.common.constants import DOMAIN_LEDGER_ID
from storage.kv_store import KeyValueStorage
from stp_core.common.log import getlogger
logger = getlogger()
class StateTsDbStorage():
def __init__(self, name: str, storages: Dict[int, KeyValueStorage]):
logger.debug("Initializing timestamp-rootHash storage")
self._storages = storages
self._name = name
def __repr__(self):
return self._name
def get(self, timestamp: int, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
value = storage.get(str(timestamp))
return value
def set(self, timestamp: int, root_hash: bytes, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return
storage.put(str(timestamp), root_hash)
def close(self):
for storage in self._storages.values():
storage.close()
def get_equal_or_prev(self, timestamp, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
return storage.get_equal_or_prev(str(timestamp))
def get_last_key(self, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
return storage.get_last_key()
| from typing import Dict
from plenum.common.constants import DOMAIN_LEDGER_ID
from storage.kv_store import KeyValueStorage
from stp_core.common.log import getlogger
logger = getlogger()
class StateTsDbStorage():
def __init__(self, name: str, storages: Dict[int, KeyValueStorage]):
logger.debug("Initializing timestamp-rootHash storage")
self._storages = storages
self._name = name
def __repr__(self):
return self._name
def get(self, timestamp: int, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
value = storage.get(str(timestamp))
return value
def set(self, timestamp: int, root_hash: bytes, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return
storage.put(str(timestamp), root_hash)
def close(self):
for storage in self._storages.values():
storage.close()
def get_equal_or_prev(self, timestamp, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
return storage.get_equal_or_prev(str(timestamp))
def get_last_key(self, ledger_id: int = DOMAIN_LEDGER_ID):
storage = self._storages.get(ledger_id)
if storage is None:
return None
return storage.get_last_key()
| none | 1 | 2.267263 | 2 | |
lib/live_cluster/client/client_util.py | jfwm2/aerospike-admin | 37 | 6614149 | # Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import itertools
import threading
from time import time
def info_to_dict(value, delimiter=";", ignore_field_without_key_value_delimiter=True):
"""
Simple function to convert string to dict
"""
if not value:
return {}
if isinstance(value, Exception):
return value
stat_dict = {}
_tmp_value_list = info_to_list(value, delimiter)
_value_list = []
delimiter2 = "="
if ignore_field_without_key_value_delimiter:
_value_list = _tmp_value_list
else:
# Sometimes value contains confusing delimiter
# In such cases, after splitting on delimiter, we get items without next delimiter (=).
# By default we ignore such items. But in some cases like dc configs we need to accept those and append to previous item.
# For ex. "dc-name=REMOTE_DC_1:nodes=2000:10:3:0:0:0:100:d+3000:int-ext-ipmap=172.16.58.3...."
# In this example, first split will give ["dc-name=REMOTE_DC_1", "nodes=2000", "10", "3",
# "0", "0", "100", "d+3000", "int-ext-ipmap=172.16.58.3", ....]. In such cases we need to append items
# (10, 3, 0, 0, 100, "d+3000") to previous valid item ("nodes=2000") with delimiter (":").
# It gives "nodes=2000:10:3:0:0:0:100:d+3000".
for _v in _tmp_value_list:
if delimiter2 not in _v:
try:
_value_list[-1] = str(_value_list[-1]) + delimiter + str(_v)
except Exception:
pass
else:
_value_list.append(_v)
stat_param = [info_to_tuple(sp, delimiter2) for sp in _value_list]
for g in itertools.groupby(stat_param, lambda x: x[0]):
try:
value = [v[1] for v in g[1]]
value = ",".join(sorted(value)) if len(value) > 1 else value[0]
stat_dict[g[0]] = value
except Exception:
# NOTE: 3.0 had a bug in stats at least prior to 3.0.44. This will
# ignore that bug.
# Not sure if this bug is fixed or not.. removing this try/catch
# results in things not working. TODO: investigate.
pass
return stat_dict
def info_to_dict_multi_level(
value,
keyname,
delimiter1=";",
delimiter2=":",
ignore_field_without_key_value_delimiter=True,
):
"""
Simple function to convert string to dict where string is format like
field1_section1=value1<delimiter2>field2_section1=value2<delimiter2>... <delimiter1> field1_section2=value3<delimiter2>field2_section2=value4<delimiter2>...
"""
if not value:
return {}
if isinstance(value, Exception):
return value
if not isinstance(keyname, list):
keyname = [keyname]
value_list = info_to_list(value, delimiter1)
value_dict = {}
if not isinstance(keyname, list):
return value_dict
for v in value_list:
values = info_to_dict(
v,
delimiter2,
ignore_field_without_key_value_delimiter=ignore_field_without_key_value_delimiter,
)
if not values or isinstance(values, Exception):
continue
for _k in keyname:
if _k not in values.keys():
continue
value_dict[values[_k]] = values
return value_dict
def info_colon_to_dict(value):
"""
Simple function to convert colon separated string to dict
"""
return info_to_dict(value, ":")
def info_to_list(value, delimiter=";"):
if isinstance(value, Exception):
return []
return re.split(delimiter, value)
def info_to_tuple(value, delimiter=":"):
return tuple(info_to_list(value, delimiter))
def info_valid(info_command_output):
return "" != info_command_output or "Error" not in info_command_output
def find_dns(endpoints):
if not endpoints:
return None
for e in endpoints:
if not e:
continue
if e.startswith("[") or e[0].isdigit():
continue
try:
return e.split(":")[0].strip()
except Exception:
pass
return None
def parse_peers_string(
peers_str, delim=",", ignore_chars_start="[", ignore_chars_end="]"
):
peers_list = []
if not peers_str or isinstance(peers_str, Exception):
return peers_list
peers_str = peers_str.strip()
if not peers_str:
return peers_list
if peers_str[0] in ignore_chars_start and peers_str[-1] in ignore_chars_end:
peers_str = peers_str[1:-1]
if not peers_str:
return peers_list
push_bracket = ignore_chars_start
pop_bracket = ignore_chars_end
b_stack = []
current_str = ""
for i in peers_str:
if i == delim:
if len(b_stack) > 0:
current_str += i
else:
peers_list.append(current_str.strip())
current_str = ""
continue
if i in push_bracket:
current_str += i
b_stack.append(i)
continue
if i in pop_bracket:
current_str += i
b_stack.pop()
continue
current_str += i
if current_str:
peers_list.append(current_str.strip())
return peers_list
def concurrent_map(func, data):
"""
Similar to the builtin function map(). But spawn a thread for each argument
and apply 'func' concurrently.
Note: unlie map(), we cannot take an iterable argument. 'data' should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# Uncomment following line to run single threaded.
# return [func(datum) for datum in data]
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
class cached(object):
# Doesn't support lists, dicts and other unhashables
# Also doesn't support kwargs for reason above.
def __init__(self, func, ttl=0.5):
self.func = func
self.ttl = ttl
self.cache = {}
def __setitem__(self, key, value):
self.cache[key] = (value, time() + self.ttl)
def __getitem__(self, key):
if key in self.cache:
value, eol = self.cache[key]
if eol > time():
return value
self[key] = self.func(*key)
return self.cache[key][0]
def __call__(self, *args):
return self[args]
def flatten(list1):
"""
Simple function to flatten peers list
Format: [((node1 endpoint1 tuple), (node1 endpoint2 tuple), ..., (node1 endpointm tuple)), ....]
Example: [(("172.17.0.1",3000,None),), (("2001:db8:85a3::8a2e",6666,None), ("172.17.0.3",3004,None))]
"""
f_list = []
for i in list1:
if isinstance(i[0], tuple):
for j in i:
f_list.append(j)
else:
f_list.append(i)
return f_list
def remove_suffix(input_string, suffix):
"""
Simple function to remove suffix from input_string if available
"""
try:
input_string = input_string.strip()
if not input_string.endswith(suffix):
return input_string
return input_string[0 : input_string.rfind(suffix)]
except Exception:
return input_string
| # Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import itertools
import threading
from time import time
def info_to_dict(value, delimiter=";", ignore_field_without_key_value_delimiter=True):
"""
Simple function to convert string to dict
"""
if not value:
return {}
if isinstance(value, Exception):
return value
stat_dict = {}
_tmp_value_list = info_to_list(value, delimiter)
_value_list = []
delimiter2 = "="
if ignore_field_without_key_value_delimiter:
_value_list = _tmp_value_list
else:
# Sometimes value contains confusing delimiter
# In such cases, after splitting on delimiter, we get items without next delimiter (=).
# By default we ignore such items. But in some cases like dc configs we need to accept those and append to previous item.
# For ex. "dc-name=REMOTE_DC_1:nodes=2000:10:3:0:0:0:100:d+3000:int-ext-ipmap=172.16.58.3...."
# In this example, first split will give ["dc-name=REMOTE_DC_1", "nodes=2000", "10", "3",
# "0", "0", "100", "d+3000", "int-ext-ipmap=172.16.58.3", ....]. In such cases we need to append items
# (10, 3, 0, 0, 100, "d+3000") to previous valid item ("nodes=2000") with delimiter (":").
# It gives "nodes=2000:10:3:0:0:0:100:d+3000".
for _v in _tmp_value_list:
if delimiter2 not in _v:
try:
_value_list[-1] = str(_value_list[-1]) + delimiter + str(_v)
except Exception:
pass
else:
_value_list.append(_v)
stat_param = [info_to_tuple(sp, delimiter2) for sp in _value_list]
for g in itertools.groupby(stat_param, lambda x: x[0]):
try:
value = [v[1] for v in g[1]]
value = ",".join(sorted(value)) if len(value) > 1 else value[0]
stat_dict[g[0]] = value
except Exception:
# NOTE: 3.0 had a bug in stats at least prior to 3.0.44. This will
# ignore that bug.
# Not sure if this bug is fixed or not.. removing this try/catch
# results in things not working. TODO: investigate.
pass
return stat_dict
def info_to_dict_multi_level(
value,
keyname,
delimiter1=";",
delimiter2=":",
ignore_field_without_key_value_delimiter=True,
):
"""
Simple function to convert string to dict where string is format like
field1_section1=value1<delimiter2>field2_section1=value2<delimiter2>... <delimiter1> field1_section2=value3<delimiter2>field2_section2=value4<delimiter2>...
"""
if not value:
return {}
if isinstance(value, Exception):
return value
if not isinstance(keyname, list):
keyname = [keyname]
value_list = info_to_list(value, delimiter1)
value_dict = {}
if not isinstance(keyname, list):
return value_dict
for v in value_list:
values = info_to_dict(
v,
delimiter2,
ignore_field_without_key_value_delimiter=ignore_field_without_key_value_delimiter,
)
if not values or isinstance(values, Exception):
continue
for _k in keyname:
if _k not in values.keys():
continue
value_dict[values[_k]] = values
return value_dict
def info_colon_to_dict(value):
"""
Simple function to convert colon separated string to dict
"""
return info_to_dict(value, ":")
def info_to_list(value, delimiter=";"):
if isinstance(value, Exception):
return []
return re.split(delimiter, value)
def info_to_tuple(value, delimiter=":"):
return tuple(info_to_list(value, delimiter))
def info_valid(info_command_output):
return "" != info_command_output or "Error" not in info_command_output
def find_dns(endpoints):
if not endpoints:
return None
for e in endpoints:
if not e:
continue
if e.startswith("[") or e[0].isdigit():
continue
try:
return e.split(":")[0].strip()
except Exception:
pass
return None
def parse_peers_string(
peers_str, delim=",", ignore_chars_start="[", ignore_chars_end="]"
):
peers_list = []
if not peers_str or isinstance(peers_str, Exception):
return peers_list
peers_str = peers_str.strip()
if not peers_str:
return peers_list
if peers_str[0] in ignore_chars_start and peers_str[-1] in ignore_chars_end:
peers_str = peers_str[1:-1]
if not peers_str:
return peers_list
push_bracket = ignore_chars_start
pop_bracket = ignore_chars_end
b_stack = []
current_str = ""
for i in peers_str:
if i == delim:
if len(b_stack) > 0:
current_str += i
else:
peers_list.append(current_str.strip())
current_str = ""
continue
if i in push_bracket:
current_str += i
b_stack.append(i)
continue
if i in pop_bracket:
current_str += i
b_stack.pop()
continue
current_str += i
if current_str:
peers_list.append(current_str.strip())
return peers_list
def concurrent_map(func, data):
"""
Similar to the builtin function map(). But spawn a thread for each argument
and apply 'func' concurrently.
Note: unlie map(), we cannot take an iterable argument. 'data' should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# Uncomment following line to run single threaded.
# return [func(datum) for datum in data]
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
class cached(object):
# Doesn't support lists, dicts and other unhashables
# Also doesn't support kwargs for reason above.
def __init__(self, func, ttl=0.5):
self.func = func
self.ttl = ttl
self.cache = {}
def __setitem__(self, key, value):
self.cache[key] = (value, time() + self.ttl)
def __getitem__(self, key):
if key in self.cache:
value, eol = self.cache[key]
if eol > time():
return value
self[key] = self.func(*key)
return self.cache[key][0]
def __call__(self, *args):
return self[args]
def flatten(list1):
"""
Simple function to flatten peers list
Format: [((node1 endpoint1 tuple), (node1 endpoint2 tuple), ..., (node1 endpointm tuple)), ....]
Example: [(("172.17.0.1",3000,None),), (("2001:db8:85a3::8a2e",6666,None), ("172.17.0.3",3004,None))]
"""
f_list = []
for i in list1:
if isinstance(i[0], tuple):
for j in i:
f_list.append(j)
else:
f_list.append(i)
return f_list
def remove_suffix(input_string, suffix):
"""
Simple function to remove suffix from input_string if available
"""
try:
input_string = input_string.strip()
if not input_string.endswith(suffix):
return input_string
return input_string[0 : input_string.rfind(suffix)]
except Exception:
return input_string
| en | 0.736629 | # Copyright 2013-2021 Aerospike, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Simple function to convert string to dict # Sometimes value contains confusing delimiter # In such cases, after splitting on delimiter, we get items without next delimiter (=). # By default we ignore such items. But in some cases like dc configs we need to accept those and append to previous item. # For ex. "dc-name=REMOTE_DC_1:nodes=2000:10:3:0:0:0:100:d+3000:int-ext-ipmap=172.16.58.3...." # In this example, first split will give ["dc-name=REMOTE_DC_1", "nodes=2000", "10", "3", # "0", "0", "100", "d+3000", "int-ext-ipmap=172.16.58.3", ....]. In such cases we need to append items # (10, 3, 0, 0, 100, "d+3000") to previous valid item ("nodes=2000") with delimiter (":"). # It gives "nodes=2000:10:3:0:0:0:100:d+3000". # NOTE: 3.0 had a bug in stats at least prior to 3.0.44. This will # ignore that bug. # Not sure if this bug is fixed or not.. removing this try/catch # results in things not working. TODO: investigate. Simple function to convert string to dict where string is format like field1_section1=value1<delimiter2>field2_section1=value2<delimiter2>... <delimiter1> field1_section2=value3<delimiter2>field2_section2=value4<delimiter2>... Simple function to convert colon separated string to dict Similar to the builtin function map(). But spawn a thread for each argument and apply 'func' concurrently. Note: unlie map(), we cannot take an iterable argument. 'data' should be an indexable sequence. # Uncomment following line to run single threaded. # return [func(datum) for datum in data] # wrapper to dispose the result in the right slot # Doesn't support lists, dicts and other unhashables # Also doesn't support kwargs for reason above. Simple function to flatten peers list Format: [((node1 endpoint1 tuple), (node1 endpoint2 tuple), ..., (node1 endpointm tuple)), ....] Example: [(("172.17.0.1",3000,None),), (("2001:db8:85a3::8a2e",6666,None), ("172.17.0.3",3004,None))] Simple function to remove suffix from input_string if available | 2.3058 | 2 |
code-colab/Mathematics - Rule of Product.py | Nahid-Hassan/Advanced-Machine-Learning | 1 | 6614150 | <filename>code-colab/Mathematics - Rule of Product.py
# Question 1
# Suppose we have 7 disjoint datasets and each dataset contains 12 data entries. How many data entries do we have in total?
no_of_datasets = 7
no_of_entries_in_each_dataset = 12
# How many entries in total
# Using rule of product,
total_entries = no_of_datasets * no_of_entries_in_each_dataset
print(total_entries) # 84
# What is the number of segments in the picture below? Each segment joins two circles.
upper_segment_node = 6
lower_segment_node = 7
total_segments = upper_segment_node * lower_segment_node
print(total_segments) # 42 | <filename>code-colab/Mathematics - Rule of Product.py
# Question 1
# Suppose we have 7 disjoint datasets and each dataset contains 12 data entries. How many data entries do we have in total?
no_of_datasets = 7
no_of_entries_in_each_dataset = 12
# How many entries in total
# Using rule of product,
total_entries = no_of_datasets * no_of_entries_in_each_dataset
print(total_entries) # 84
# What is the number of segments in the picture below? Each segment joins two circles.
upper_segment_node = 6
lower_segment_node = 7
total_segments = upper_segment_node * lower_segment_node
print(total_segments) # 42 | en | 0.943006 | # Question 1 # Suppose we have 7 disjoint datasets and each dataset contains 12 data entries. How many data entries do we have in total? # How many entries in total # Using rule of product, # 84 # What is the number of segments in the picture below? Each segment joins two circles. # 42 | 3.776293 | 4 |