hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acdeb78d51b0d6afe837a9253d6d4e5f17f7e59f | 6,263 | py | Python | docs/release/generate_release_notes.py | quantumjot/napari | f87e18641c529f6c592553b052805c9d75231b90 | [
"BSD-3-Clause"
] | null | null | null | docs/release/generate_release_notes.py | quantumjot/napari | f87e18641c529f6c592553b052805c9d75231b90 | [
"BSD-3-Clause"
] | null | null | null | docs/release/generate_release_notes.py | quantumjot/napari | f87e18641c529f6c592553b052805c9d75231b90 | [
"BSD-3-Clause"
] | 1 | 2020-07-19T18:03:35.000Z | 2020-07-19T18:03:35.000Z | """Generate the release notes automatically from Github pull requests.
Start with:
```
export GH_TOKEN=<your-gh-api-token>
```
Then, for to include everything from a certain release to master:
```
python /path/to/generate_release_notes.py v0.14.0 master --version 0.15.0
```
Or two include only things between two releases:
```
python /path/to/generate_release_notes.py v.14.2 v0.14.3 --version 0.14.3
```
You should probably redirect the output with:
```
python /path/to/generate_release_notes.py [args] | tee release_notes.md
```
You'll require PyGitHub and tqdm, which you can install with:
```
pip install -r requirements/_release_tools.txt
```
References
https://github.com/scikit-image/scikit-image/blob/master/tools/generate_release_notes.py
https://github.com/scikit-image/scikit-image/issues/3404
https://github.com/scikit-image/scikit-image/issues/3405
"""
import os
import argparse
from datetime import datetime
from collections import OrderedDict
from warnings import warn
from github import Github
try:
from tqdm import tqdm
except ImportError:
warn(
'tqdm not installed. This script takes approximately 5 minutes '
'to run. To view live progressbars, please install tqdm. '
'Otherwise, be patient.'
)
def tqdm(i, **kwargs):
return i
GH = "https://github.com"
GH_USER = 'napari'
GH_REPO = 'napari'
GH_TOKEN = os.environ.get('GH_TOKEN')
if GH_TOKEN is None:
raise RuntimeError(
"It is necessary that the environment variable `GH_TOKEN` "
"be set to avoid running into problems with rate limiting. "
"One can be acquired at https://github.com/settings/tokens.\n\n"
"You do not need to select any permission boxes while generating "
"the token."
)
g = Github(GH_TOKEN)
repository = g.get_repo(f'{GH_USER}/{GH_REPO}')
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument('from_commit', help='The starting tag.')
parser.add_argument('to_commit', help='The head branch.')
parser.add_argument(
'--version', help="Version you're about to release.", default='0.2.0'
)
args = parser.parse_args()
for tag in repository.get_tags():
if tag.name == args.from_commit:
previous_tag = tag
break
else:
raise RuntimeError(f'Desired tag ({args.from_commit}) not found')
# For some reason, go get the github commit from the commit to get
# the correct date
github_commit = previous_tag.commit.commit
previous_tag_date = datetime.strptime(
github_commit.last_modified, '%a, %d %b %Y %H:%M:%S %Z'
)
all_commits = list(
tqdm(
repository.get_commits(sha=args.to_commit, since=previous_tag_date),
desc=f'Getting all commits between {args.from_commit} '
f'and {args.to_commit}',
)
)
all_hashes = set(c.sha for c in all_commits)
def add_to_users(users, new_user):
if new_user.name is None:
users[new_user.login] = new_user.login
else:
users[new_user.login] = new_user.name
authors = set()
committers = set()
reviewers = set()
users = {}
for commit in tqdm(all_commits, desc="Getting commiters and authors"):
if commit.committer is not None:
add_to_users(users, commit.committer)
committers.add(commit.committer.login)
if commit.author is not None:
add_to_users(users, commit.author)
authors.add(commit.author.login)
# remove these bots.
committers.discard("web-flow")
authors.discard("azure-pipelines-bot")
highlights = OrderedDict()
highlights['Highlights'] = {}
highlights['New Features'] = {}
highlights['Improvements'] = {}
highlights['Bug Fixes'] = {}
highlights['API Changes'] = {}
highlights['Deprecations'] = {}
highlights['Build Tools'] = {}
other_pull_requests = {}
for pull in tqdm(
g.search_issues(
f'repo:{GH_USER}/{GH_REPO} '
f'merged:>{previous_tag_date.isoformat()} '
'sort:created-asc'
),
desc='Pull Requests...',
):
pr = repository.get_pull(pull.number)
if pr.merge_commit_sha in all_hashes:
summary = pull.title
for review in pr.get_reviews():
if review.user is not None:
add_to_users(users, review.user)
reviewers.add(review.user.login)
for key, key_dict in highlights.items():
pr_title_prefix = (key + ': ').lower()
if summary.lower().startswith(pr_title_prefix):
key_dict[pull.number] = {
'summary': summary[len(pr_title_prefix) :]
}
break
else:
other_pull_requests[pull.number] = {'summary': summary}
# add Other PRs to the ordered dict to make doc generation easier.
highlights['Other Pull Requests'] = other_pull_requests
# Now generate the release notes
title = f'# napari {args.version}'
print(title)
print(
f"""
We're happy to announce the release of napari {args.version}!
napari is a fast, interactive, multi-dimensional image viewer for Python.
It's designed for browsing, annotating, and analyzing large multi-dimensional
images. It's built on top of Qt (for the GUI), vispy (for performant GPU-based
rendering), and the scientific Python stack (numpy, scipy).
"""
)
print(
"""
For more information, examples, and documentation, please visit our website:
https://github.com/napari/napari
"""
)
for section, pull_request_dicts in highlights.items():
print(f'## {section}\n')
if len(pull_request_dicts.items()) == 0:
print()
for number, pull_request_info in pull_request_dicts.items():
print(f'- {pull_request_info["summary"]} (#{number})')
contributors = OrderedDict()
contributors['authors'] = authors
contributors['reviewers'] = reviewers
# ignore committers
# contributors['committers'] = committers
for section_name, contributor_set in contributors.items():
print()
if None in contributor_set:
contributor_set.remove(None)
committer_str = (
f'## {len(contributor_set)} {section_name} added to this '
'release (alphabetical)'
)
print(committer_str)
print()
for c in sorted(contributor_set, key=lambda x: users[x].lower()):
commit_link = f"{GH}/{GH_USER}/{GH_REPO}/commits?author={c}"
print(f"- [{users[c]}]({commit_link}) - @{c}")
print()
| 29.130233 | 88 | 0.68258 |
acdeb7cfe874412e903cb1a70b0c98e34cbd8262 | 244 | py | Python | project/app/models/pydantic.py | marcusholmgren/fast-api-docker | 48a03a4dc2568ec28176230772e13264105a5e84 | [
"MIT"
] | null | null | null | project/app/models/pydantic.py | marcusholmgren/fast-api-docker | 48a03a4dc2568ec28176230772e13264105a5e84 | [
"MIT"
] | null | null | null | project/app/models/pydantic.py | marcusholmgren/fast-api-docker | 48a03a4dc2568ec28176230772e13264105a5e84 | [
"MIT"
] | null | null | null | from pydantic import BaseModel, AnyHttpUrl
class SummaryPayloadSchema(BaseModel):
url: AnyHttpUrl
class SummaryResponseSchema(SummaryPayloadSchema):
id: int
class SummaryUpdatePayloadSchema(SummaryPayloadSchema):
summary: str
| 17.428571 | 55 | 0.803279 |
acdeb7e7b4ab5d79a90bea7a8cf2aba88363a097 | 6,714 | py | Python | opentamp/test/test_core/test_parsing/test_parse_problem_config.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
] | 4 | 2022-02-13T15:52:18.000Z | 2022-03-26T17:33:13.000Z | opentamp/test/test_core/test_parsing/test_parse_problem_config.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
] | 1 | 2022-02-13T22:48:09.000Z | 2022-02-13T22:48:09.000Z | opentamp/test/test_core/test_parsing/test_parse_problem_config.py | Algorithmic-Alignment-Lab/openTAMP | f0642028d551d0436b3a3dbc3bfb2f23a00adc14 | [
"MIT"
] | null | null | null | import unittest
from opentamp.core.parsing import parse_domain_config
from opentamp.core.parsing import parse_problem_config
from opentamp.core.util_classes import matrix
from errors_exceptions import ProblemConfigException, ParamValidationException
import main
class TestParseProblemConfig(unittest.TestCase):
def setUp(self):
domain_fname, problem_fname = '../domains/namo_domain/namo.domain', '../domains/namo_domain/namo_probs/namo_1234_1.prob'
d_c = main.parse_file_to_dict(domain_fname)
self.domain = parse_domain_config.ParseDomainConfig.parse(d_c)
self.p_c = main.parse_file_to_dict(problem_fname)
def test_init_state(self):
problem = parse_problem_config.ParseProblemConfig.parse(self.p_c, self.domain)
#The number of params and preds keeps on changing
self.assertEqual(len(problem.init_state.params), 13)
self.assertEqual(len(problem.init_state.preds), 17)
self.assertEqual(sum(1 for k, p in list(problem.init_state.params.items()) if p.get_type() == "Can"), 2)
self.assertEqual(sum(1 for k, p in list(problem.init_state.params.items()) if p.get_type() == "Target"), 3)
self.assertEqual(sum(1 for k, p in list(problem.init_state.params.items()) if not p.is_symbol()), 4)
self.assertEqual(sum(1 for k, p in list(problem.init_state.params.items()) if p.name.startswith("pdp")), 3)
def test_goal_test(self):
problem = parse_problem_config.ParseProblemConfig.parse(self.p_c, self.domain)
## initially the goal is false
self.assertFalse(problem.goal_test())
## want can0 at target1 and can1 at target0
## target1 is at 7, 6; target0 is at 2, 3
for k, p in list(problem.init_state.params.items()):
if k == "can0":
p.pose = matrix.Vector2d((7, 6))
if k == "can1":
p.pose = matrix.Vector2d((2, 3))
self.assertTrue(problem.goal_test())
def test_missing_object(self):
p2 = self.p_c.copy()
p2["Objects"] = "Robot (name pr2); Target (name target0); Target (name target1); RobotPose (name gp_can0)"
with self.assertRaises(ProblemConfigException) as cm:
problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
self.assertEqual(cm.exception.message, "'pdp_target0' is not an object in problem file.")
p2["Objects"] = ""
with self.assertRaises(ProblemConfigException) as cm:
problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
self.assertEqual(cm.exception.message, "Problem file needs objects.")
del p2["Objects"]
with self.assertRaises(ProblemConfigException) as cm:
problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
self.assertEqual(cm.exception.message, "Problem file needs objects.")
def test_missing_prim_preds(self):
p2 = self.p_c.copy()
p2["Init"] = ";(At can0 target0), (InContact pr2 gp_can0 can0)"
with self.assertRaises(ProblemConfigException) as cm:
problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
self.assertEqual(cm.exception.message, "Problem file has no primitive predicates for object 'grasp0'.")
def test_missing_derived_preds(self):
# should work fine even with no derived predicates
p2 = self.p_c.copy()
p2["Init"] = "(pose pr2 [1, 2]), (geom pr2 1), (geom target0 1), (value target0 [3, 5]), (geom target1 1), (value target1 [4,6]), (geom can0 1), (pose can0 [3, 5]), (value pdp_target0 undefined), (value grasp0 undefined), (value robot_init_pose [1, 2]), (pose can1 [2, 3]), (value target2 [5, 5]), (value pdp_target2 undefined), (value pdp_target1 undefined), (pose obs0 [0, 0]), (geom obs0 closet), (value robot_end_pose [0, 0]);"
problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
self.assertEqual(problem.init_state.preds, set())
# def test_failures(self):
# p2 = self.p_c.copy()
# p2["Objects"] += "; "
# p2["Init"] = ""
# with self.assertRaises(ProblemConfigException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Problem file needs init.")
# del p2["Init"]
# with self.assertRaises(ProblemConfigException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Problem file needs init.")
# p2 = self.p_c.copy()
# p2["Objects"] += "; Test (name testname)"
# with self.assertRaises(AssertionError) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# p2 = self.p_c.copy()
# p2["Objects"] += "; Test (name testname. value (3, 5))"
# with self.assertRaises(ProblemConfigException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Parameter 'testname' not defined in domain file.")
# p2 = self.p_c.copy()
# p2["Init"] = "(pose pr2 [1, 2]), (geom pr2 1), (geom target0 1), (pose target0 [3, 5]), (geom target1 1), (pose target1 [4,6]), (geom can0 1), (pose can0 [3, 5]), (value pdp_target0 undefined); (At target0 can0), (InContact pr2 pdp_target0 target0)"
# with self.assertRaises(ParamValidationException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Parameter type validation failed for predicate 'initpred0: (At target0 can0)'.")
# p2 = self.p_c.copy()
# p2["Init"] = "(pose pr2 [1, 2]), (geom pr2 1), (geom target0 1), (pose target0 [3, 5]), (geom target1 1), (pose target1 [4,6]), (geom can0 1), (pose can0 [3, 5]), (value gp_can0 undefined); (At can0 target2), (InContact pr2 gp_can0 target0)"
# with self.assertRaises(ProblemConfigException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Parameter 'target2' for predicate type 'At' not defined in domain file.")
# p2 = self.p_c.copy()
# p2["Goal"] = "(At can0 target3)"
# with self.assertRaises(ProblemConfigException) as cm:
# problem = parse_problem_config.ParseProblemConfig.parse(p2, self.domain)
# self.assertEqual(cm.exception.message, "Parameter 'target3' for predicate type 'At' not defined in domain file.")
if __name__ == "__main__":
unittest.main()
| 58.894737 | 439 | 0.668454 |
acdeb92e671a30bbfcadf0c488670519e7867cee | 11,153 | py | Python | libs/config.py | Sp-Bro/traffic_police_gesture_pytorch | 5a90ca3584d796faf33ff71d88a540751fbd8a4c | [
"MIT"
] | 4 | 2020-07-11T12:42:07.000Z | 2020-10-08T07:00:50.000Z | libs/config.py | al02131221/traffic_police_gesture_pytorch | 5d37fe0f595c7e380c3141b9f0975f327b00946c | [
"MIT"
] | 3 | 2021-02-23T08:54:35.000Z | 2021-02-23T13:15:19.000Z | libs/config.py | al02131221/traffic_police_gesture_pytorch | 5d37fe0f595c7e380c3141b9f0975f327b00946c | [
"MIT"
] | 2 | 2020-07-11T12:42:08.000Z | 2020-12-19T13:12:26.000Z | import os
import os.path as osp
import copy
import yaml
import numpy as np
from ast import literal_eval
from libs.utils.collections import AttrDict
__C = AttrDict()
cfg = __C
# ---------------------------------------------------------------------------- #
# Misc options
# --------------------------------------------------------------------------- #
# Device for training or testing
# E.g., 'cuda' for using GPU, 'cpu' for using CPU
__C.DEVICE = 'cuda'
# Pixel mean values (BGR order) as a list
__C.PIXEL_MEANS = np.array([[[0.485, 0.456, 0.406]]])
# Pixel std values (BGR order) as a list
__C.PIXEL_STDS = np.array([[[0.229, 0.224, 0.225]]])
# Calculation the model flops and params
__C.CALC_FLOPS = True
# Directory for saving checkpoints and loggers
__C.CKPT = 'ckpts/pose'
# Display the log per iteration
__C.DISPLAY_ITER = 20
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# ---------------------------------------------------------------------------- #
# Optimizer options
# ---------------------------------------------------------------------------- #
__C.SOLVER = AttrDict()
# Optimizer type
__C.SOLVER.OPTIMIZER = 'adam'
# learning rate
__C.SOLVER.LR = 0.001
# learning rate adjusting schedule
__C.SOLVER.LR_SCHEDULE = 'step'
# Momentum in sgd
__C.SOLVER.MOMENTUM = 0.9
# Number of iter adjusts the lr
__C.SOLVER.UPDATE_ITER = 10000
# Decay rate in adjusting lr
__C.SOLVER.UPDATE_RATE = 0.8
# ---------------------------------------------------------------------------- #
# Pose options
# ---------------------------------------------------------------------------- #
__C.POSE = AttrDict()
# Type of pose estimation model
__C.POSE.TYPE = 'paf'
# Number of key points
__C.POSE.NUM_KPTS = 14
# Number of pafs
__C.POSE.NUM_PAFS = 22 # (11 * 2)
# Confidence threshold of key points
__C.POSE.KPT_THRESH = 0.1
# The size of image inputted pose model
__C.POSE.SCALE = (512, 512)
# Connections of limbs
__C.POSE.LIMBS = [[1,2], [2,3],
[4,5], [5,6],
[14,1], [14,4],
[7,8], [8,9],
[10,11], [11,12],
[13,14]]
# Head's key points connection
__C.POSE.HEAD = [13, 14]
# Body's key points connection
__C.POSE.BODY = [[1,2], [2,3],
[4,5], [5,6],
[14,1], [14,4],
[7,8], [8,9],
[10,11], [11,12]]
# limb width for judge the point in one limb or not
__C.POSE.LIMB_WIDTH = 2.5
# variance for paf to generate Gaussian heat map
__C.POSE.GAUSSIAN_VAR = 1.1
# ---------------------------------------------------------------------------- #
# Pose options
# ---------------------------------------------------------------------------- #
__C.RNN = AttrDict()
# Number of classes
__C.RNN.NUM_CLASSES = 9
# Feature number of rnn input, default is 30 represents 10 limbs and 20 angles
__C.RNN.DIM_IN = 30
# Target delay
__C.RNN.TARGET_DELAY = 15
# time step
__C.RNN.TIME_STEP = 1350 # 90*15 for video which is 15 fps
# traffic police pose English name
__C.RNN.GESTURES = {0: "--",
1: "STOP",
2: "MOVE STRAIGHT",
3: "LEFT TURN",
4: "LEFT TURN WAITING",
5: "RIGHT TURN",
6: "LANG CHANGING",
7: "SLOW DOWN",
8: "PULL OVER"}
# --------------------------------------------------------------------------- #
# Test options
# --------------------------------------------------------------------------- #
__C.TEST = AttrDict()
# Test data path
__C.TEST.DATA_PATH = 'rnn/test'
# Key points coordinates save path
__C.TEST.SAVE_PATH = 'paf_features'
# Model weights path
__C.TEST.WEIGHTS = 'weights/mypaf.pth'
# dir of npy files for training
__C.TEST.NPY_DIR = 'rnn/test_npy'
# dir of annotations files for training
__C.TEST.CSV_DIR = 'rnn/test_csv'
# test batch size
__C.TEST.BATCH_SIZE = 2
# --------------------------------------------------------------------------- #
# Train options
# --------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
# Test data path
__C.TRAIN.DATA_PATH = 'ai_challenger'
# Key points coordinates save path
__C.TRAIN.SAVE_PATH = 'paf_features'
# Model weights path
__C.TRAIN.WEIGHTS = ''
# Image scale during train
__C.TRAIN.SCALE = (512, 512)
# Snapshot iteration
__C.TRAIN.SNAPSHOT = 10000
# Iterations for training
__C.TRAIN.ITERS = 40000
# batch size
__C.TRAIN.BATCH_SIZE = 4
# number of threads used loading data
__C.TRAIN.LOAD_THREADS = 4
# dir of npy files for training
__C.TRAIN.NPY_DIR = 'rnn/train_npy'
# dir of annotations files for training
__C.TRAIN.CSV_DIR = 'rnn/train_csv'
# ---------------------------------------------------------------------------- #
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
# ---------------------------------------------------------------------------- #
_DEPCRECATED_KEYS = set()
# ---------------------------------------------------------------------------- #
# Renamed options
# If you rename a config option, record the mapping from the old name to the new
# name in the dictionary below. Optionally, if the type also changed, you can
# make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
# ---------------------------------------------------------------------------- #
_RENAMED_KEYS = {
'EXAMPLE.RENAMED.KEY': 'EXAMPLE.KEY', # Dummy example to follow
'PIXEL_MEAN': 'PIXEL_MEANS',
'PIXEL_STD': 'PIXEL_STDS',
}
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), \
'`a` (cur type {}) must be an instance of {}'.format(type(a), AttrDict)
assert isinstance(b, AttrDict), \
'`b` (cur type {}) must be an instance of {}'.format(type(b), AttrDict)
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
if _key_is_deprecated(full_key):
continue
elif _key_is_renamed(full_key):
_raise_key_rename_error(full_key)
else:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def merge_cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
with open(filename, 'r') as f:
yaml_cfg = AttrDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
# update_cfg()
def merge_cfg_from_cfg(cfg_other):
"""Merge `cfg_other` into the global config."""
_merge_a_into_b(cfg_other, __C)
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if _key_is_deprecated(full_key):
continue
if _key_is_renamed(full_key):
_raise_key_rename_error(full_key)
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
def _key_is_deprecated(full_key):
if full_key in _DEPCRECATED_KEYS:
return True
return False
def _key_is_renamed(full_key):
return full_key in _RENAMED_KEYS
def _raise_key_rename_error(full_key):
new_key = _RENAMED_KEYS[full_key]
if isinstance(new_key, tuple):
msg = ' Note: ' + new_key[1]
new_key = new_key[0]
else:
msg = ''
raise KeyError(
'Key {} was renamed to {}; please update your config.{}'.
format(full_key, new_key, msg)
)
| 34.316923 | 80 | 0.56756 |
acdeb9e52ece33beeb261befe4a395073c552849 | 7,356 | py | Python | accelbyte_py_sdk/api/social/operations/global_statistic/get_global_stat_items.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/social/operations/global_statistic/get_global_stat_items.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/social/operations/global_statistic/get_global_stat_items.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:01:27.252526+08:00
# from: Justice Social Service (1.17.1)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import GlobalStatItemPagingSlicedResult
class GetGlobalStatItems(Operation):
"""List global statItems (getGlobalStatItems)
Properties:
url: /social/v1/admin/namespaces/{namespace}/globalstatitems
method: GET
tags: GlobalStatistic
consumes: []
produces: ["application/json"]
security: bearer
namespace: (namespace) REQUIRED str in path
offset: (offset) OPTIONAL int in query
limit: (limit) OPTIONAL int in query
Responses:
200: OK - GlobalStatItemPagingSlicedResult (successful operation)
"""
# region fields
_url: str = "/social/v1/admin/namespaces/{namespace}/globalstatitems"
_method: str = "GET"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
namespace: str # REQUIRED in [path]
offset: int # OPTIONAL in [query]
limit: int # OPTIONAL in [query]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
# query params
result += "?" + "&".join([f"{k}={v}" for k, v in self.get_query_params().items()])
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"namespace",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
"query": self.get_query_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
def get_query_params(self) -> dict:
result = {}
if hasattr(self, "offset"):
result["offset"] = self.offset
if hasattr(self, "limit"):
result["limit"] = self.limit
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "namespace") or self.namespace is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> GetGlobalStatItems:
self.namespace = value
return self
def with_offset(self, value: int) -> GetGlobalStatItems:
self.offset = value
return self
def with_limit(self, value: int) -> GetGlobalStatItems:
self.limit = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "offset") and self.offset:
result["offset"] = int(self.offset)
elif include_empty:
result["offset"] = int()
if hasattr(self, "limit") and self.limit:
result["limit"] = int(self.limit)
elif include_empty:
result["limit"] = int()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, GlobalStatItemPagingSlicedResult], Union[None, HttpResponse]]:
"""Parse the given response.
200: OK - GlobalStatItemPagingSlicedResult (successful operation)
"""
if code == 200:
return GlobalStatItemPagingSlicedResult.create_from_dict(content), None
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
offset: Optional[int] = None,
limit: Optional[int] = None,
) -> GetGlobalStatItems:
instance = cls()
instance.namespace = namespace
if offset is not None:
instance.offset = offset
if limit is not None:
instance.limit = limit
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> GetGlobalStatItems:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "offset" in dict_ and dict_["offset"] is not None:
instance.offset = int(dict_["offset"])
elif include_empty:
instance.offset = int()
if "limit" in dict_ and dict_["limit"] is not None:
instance.limit = int(dict_["limit"])
elif include_empty:
instance.limit = int()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"offset": "offset",
"limit": "limit",
}
# endregion static methods
| 29.075099 | 156 | 0.603861 |
acdebb901c97c6abd079df51b0f8ec282234c5e9 | 39,851 | py | Python | test/test_kas_hc.py | ryanstwrt/multi_agent_blackboard_system | b8f6ab71dfe0742a6f690de19b97d10504fc1768 | [
"MIT"
] | 1 | 2021-08-02T10:29:35.000Z | 2021-08-02T10:29:35.000Z | test/test_kas_hc.py | ryanstwrt/multi_agent_blackboard_system | b8f6ab71dfe0742a6f690de19b97d10504fc1768 | [
"MIT"
] | 10 | 2020-03-14T07:39:34.000Z | 2021-11-03T22:55:28.000Z | test/test_kas_hc.py | ryanstwrt/multi_agent_blackboard_system | b8f6ab71dfe0742a6f690de19b97d10504fc1768 | [
"MIT"
] | 1 | 2021-07-18T14:43:10.000Z | 2021-07-18T14:43:10.000Z | from osbrain import run_nameserver
from osbrain import run_agent
import mabs.ka.ka_s.hill_climb as hc
import mabs.bb.blackboard_optimization as bb_opt
import time
from mabs.utils.problem import BenchmarkProblem
dvs = {'x{}'.format(x):{'ll':0.0, 'ul':1.0, 'variable type': float} for x in range(3)}
objs = {'f{}'.format(x): {'ll':0.0, 'ul':1000, 'goal':'lt', 'variable type': float} for x in range(3)}
problem = BenchmarkProblem(design_variables=dvs,
objectives=objs,
constraints={},
benchmark_name = 'dtlz1')
def test_init():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
rp = run_agent(name='ka_rp', base=hc.HillClimb)
assert rp.get_attr('_base_trigger_val') == 5.00001
assert rp.get_attr('avg_diff_limit') == 5
assert rp.get_attr('step_size') == 0.1
assert rp.get_attr('step_rate') == 0.1
assert rp.get_attr('step_limit') == 100
assert rp.get_attr('convergence_criteria') == 0.001
assert rp.get_attr('hc_type') == 'simple'
assert rp.get_attr('_class') == 'local search hc'
ns.shutdown()
time.sleep(0.1)
def test_determine_step_steepest_ascent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='bb', base=bb_opt.BbOpt)
dvs = {'height': {'ll': 50.0, 'ul': 80.0, 'variable type': float},
'smear': {'ll': 50.0, 'ul': 70.0, 'variable type': float},
'pu_content': {'ll': 0.0, 'ul': 1.0, 'variable type': float}}
objs = {'reactivity swing': {'ll':0, 'ul':15000, 'goal':'lt', 'variable type': float},
'burnup': {'ll':0, 'ul':2000, 'goal':'gt', 'variable type': float},
'eol keff': {'ll':1.0, 'ul':2.0, 'goal':'et', 'target': 1.5, 'variable type': float},
'power': {'ll':0, 'ul':10, 'goal':'lt', 'variable type': list, 'goal type':'max'}}
cons = {'eol keff': {'ll':1.0, 'ul':2.5, 'variable type': float}}
bb.initialize_abstract_level_3(objectives=objs, design_variables=dvs, constraints=cons)
bb.connect_agent(hc.HillClimb, 'ka_rp_exploit')
ka = bb.get_attr('_proxy_server')
rp = ka.proxy('ka_rp_exploit')
rp.set_attr(hc_type='steepest ascent')
bb.update_abstract_lvl(3, 'core_[65.0,65.0,0.42]', {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}, panel='old')
bb.update_abstract_lvl(1, 'core_[65.0,65.0,0.42]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
# Test an increase in burnup (greater than test)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 60.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 67.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff,3) == 0.09
assert pert == '+ height'
# Test an increase in reactivity swing (less than test)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 680.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 710.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 0.053
assert pert == '+ pu_content'
# Test an increase in keff (equal to test - below value)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.1, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.4, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.3, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 20.0
assert pert == '+ pu_content'
# Test an increase in keff (equal to test - above vaalue)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.9, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.7, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 20.0
assert pert == '+ pu_content'
# Test an increase in keff (equal to test - cross the value)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.45, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.57, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 3.333
assert pert == '+ pu_content'
# Test an increase in power ()
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.45, 'power': [1.0,2.0,2.5]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.57, 'power': [1.0,2.0,2.75]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 5.0
assert pert == '+ pu_content'
# Test a postive a change in two objectives
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 710.11, 'burnup' : 60.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 680.11, 'burnup' : 67.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 0.138
assert pert == '+ height'
# Test a postive a change in both objectives (both have of ~0.078, but + pu_content is slightly greater})
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 661.51, 'burnup' : 60.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 710.11, 'burnup' : 67.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert round(diff, 3) == 0.078
assert pert == '+ pu_content'
# Test a case with no change in design variables
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 661.51, 'burnup' : 60.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 710.11, 'burnup' : 67.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert diff == None
assert pert == None
# Test a constraint violation
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 661.51, 'burnup' : 60.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 0.9}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 710.11, 'burnup' : 67.12, 'eol keff': 1.6, 'power': [1.0,2.0,3.0]},
'constraints': {'eol keff': 2.8}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert diff == None
assert pert == None
ns.shutdown()
time.sleep(0.1)
def test_determine_step_simple():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='bb', base=bb_opt.BbOpt)
objs = {'reactivity swing': {'ll':0, 'ul':15000, 'goal':'lt', 'variable type': float},
'burnup': {'ll':0, 'ul':2000, 'goal':'gt', 'variable type': float}}
dvs = {'height': {'ll': 50.0, 'ul': 80.0, 'variable type': float},
'smear': {'ll': 50.0, 'ul': 70.0, 'variable type': float},
'pu_content': {'ll': 0.0, 'ul': 1.0, 'variable type': float}}
cons = {'eol keff': {'ll':1.0, 'ul':2.5, 'variable type': float}}
bb.initialize_abstract_level_3(objectives=objs, design_variables=dvs, constraints=cons)
bb.connect_agent(hc.HillClimb, 'ka_rp_exploit')
ka = bb.get_attr('_proxy_server')
rp = ka.proxy('ka_rp_exploit')
bb.update_abstract_lvl(3, 'core_[65.0,65.0,0.42]', {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 61.12},
'constraints': {'eol keff': 1.1}}, panel='old')
bb.update_abstract_lvl(1, 'core_[65.0,65.0,0.42]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
# Test an increase in burnup (greater than test)
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 60.12},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 67.12},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert pert == '+ height'
# Test multiple increases
base = {'height': 65.0, 'smear': 65.0, 'pu_content': 0.42}
base_design = {'reactivity swing' : 704.11, 'burnup' : 61.12}
design_dict = {'+ pu_content' : {'design variables': {'height': 65.0, 'smear': 65.0, 'pu_content': 0.45},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 60.12},
'constraints': {'eol keff': 1.1}},
'+ height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 704.11, 'burnup' : 67.12},
'constraints': {'eol keff': 1.1}},
'- height' : {'design variables': {'height': 66.0, 'smear': 65.0, 'pu_content': 0.42},
'objective functions': {'reactivity swing' : 650.11, 'burnup' : 62.12},
'constraints': {'eol keff': 1.1}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert pert == '+ height' or '- height'
ns.shutdown()
time.sleep(0.1)
def test_determine_step_simple_discrete_dv():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=bb_opt.BbOpt)
dv = {'x0' : {'options': ['0', '1', '2', '3'], 'default': '0', 'variable type': str},
'x1' : {'options': ['0', '1', '2', '3'], 'default': '1', 'variable type': str},
'x2' : {'options': ['0', '1', '2', '3'], 'default': '2', 'variable type': str},
'x3' : {'options': [0, 1, 2, 3], 'variable type': int}}
obj = {'f1': {'ll': 80, 'ul':200, 'goal': 'lt', 'variable type': float}}
bb.initialize_abstract_level_3(design_variables=dv,objectives=obj)
bb.connect_agent(hc.HillClimb, 'ka_rp')
rp = ns.proxy('ka_rp')
rp.set_random_seed(seed=1)
rp.set_attr(hc_type='steepest ascent')
rp.set_attr(new_designs=['core_1'])
rp.set_attr(_lvl_data={'core_1': {'design variables': {'x0': '0',
'x1': '1',
'x2': '2',
'x3': '3'}}})
base = {'x0': '0', 'x1': '1', 'x2': '2', 'x3': 3}
base_design = {'f1': 100}
design_dict = {'+ x0' : {'design variables': {'x0': '0', 'x1': '1', 'x2': '2', 'x3': 3},
'objective functions': {'f1': 95},
'constraints': {}},
'+ x1' : {'design variables': {'x0': '0', 'x1': '1', 'x2': '2', 'x3': 3},
'objective functions': {'f1': 81},
'constraints': {}}}
pert, diff = rp.determine_step(base, base_design, design_dict)
assert pert == '+ x1'
assert round(diff, 5) == 0.15833
ns.shutdown()
time.sleep(0.1)
def test_search_method_steepest_ascent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='bb', base=bb_opt.BbOpt)
bb.initialize_abstract_level_3(objectives=objs, design_variables=dvs)
bb.connect_agent(hc.HillClimb, 'ka_rp_exploit')
ka = bb.get_attr('_proxy_server')
rp = ka.proxy('ka_rp_exploit')
rp.set_attr(problem=problem)
rp.set_attr(step_size=0.2)
rp.set_attr(step_rate=0.5)
rp.set_attr(step_limit=1)
rp.set_attr(convergence_criteria=0.001)
rp.set_attr(hc_type='steepest ascent')
rp.set_random_seed(seed=1099)
bb.update_abstract_lvl(3, 'core_[0.650,0.650,0.4]', {'design variables': {'x0': 0.650, 'x1': 0.650, 'x2': 0.4},
'objective functions': {'f0': 365.0, 'f1': 500.0, 'f2' : 600.0}}, panel='old')
bb.update_abstract_lvl(1, 'core_[0.650,0.650,0.4]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
rp.set_attr(new_designs=['core_[0.650,0.650,0.4]'])
rp.search_method()
time.sleep(0.075)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0.65,0.65,0.32]', 'core_[0.65,0.65,0.48]', 'core_[0.65,0.52,0.4]', 'core_[0.65,0.78,0.4]', 'core_[0.52,0.65,0.4]', 'core_[0.78,0.65,0.4]', 'core_[0.65,0.65,0.384]', 'core_[0.65,0.65,0.576]', 'core_[0.65,0.52,0.48]', 'core_[0.65,0.78,0.48]', 'core_[0.52,0.65,0.48]', 'core_[0.78,0.65,0.48]']
rp.set_attr(step_limit=25)
rp.set_random_seed(seed=1099)
rp.search_method()
time.sleep(0.075)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0.65,0.65,0.32]', 'core_[0.65,0.65,0.48]', 'core_[0.65,0.52,0.4]', 'core_[0.65,0.78,0.4]', 'core_[0.52,0.65,0.4]', 'core_[0.78,0.65,0.4]', 'core_[0.65,0.65,0.384]', 'core_[0.65,0.65,0.576]', 'core_[0.65,0.52,0.48]', 'core_[0.65,0.78,0.48]', 'core_[0.52,0.65,0.48]', 'core_[0.78,0.65,0.48]', 'core_[0.65,0.65,0.3072]', 'core_[0.65,0.65,0.4608]', 'core_[0.65,0.52,0.384]', 'core_[0.65,0.78,0.384]', 'core_[0.52,0.65,0.384]', 'core_[0.78,0.65,0.384]', 'core_[0.65,0.65,0.24576]', 'core_[0.65,0.65,0.36864]', 'core_[0.65,0.52,0.3072]', 'core_[0.65,0.78,0.3072]', 'core_[0.52,0.65,0.3072]', 'core_[0.78,0.65,0.3072]', 'core_[0.65,0.65,0.27648]', 'core_[0.65,0.65,0.33792]', 'core_[0.65,0.585,0.3072]', 'core_[0.65,0.715,0.3072]', 'core_[0.585,0.65,0.3072]', 'core_[0.715,0.65,0.3072]', 'core_[0.65,0.65,0.29184]', 'core_[0.65,0.65,0.32256]', 'core_[0.65,0.6175,0.3072]', 'core_[0.65,0.6825,0.3072]', 'core_[0.6175,0.65,0.3072]', 'core_[0.6825,0.65,0.3072]', 'core_[0.65,0.65,0.29952]', 'core_[0.65,0.65,0.31488]', 'core_[0.65,0.63375,0.3072]', 'core_[0.65,0.66625,0.3072]', 'core_[0.63375,0.65,0.3072]', 'core_[0.66625,0.65,0.3072]', 'core_[0.65,0.65,0.29203]', 'core_[0.65,0.65,0.30701]', 'core_[0.65,0.63375,0.29952]', 'core_[0.65,0.66625,0.29952]', 'core_[0.63375,0.65,0.29952]', 'core_[0.66625,0.65,0.29952]', 'core_[0.65,0.65,0.29578]', 'core_[0.65,0.65,0.30326]', 'core_[0.65,0.64187,0.29952]', 'core_[0.65,0.65813,0.29952]', 'core_[0.64187,0.65,0.29952]', 'core_[0.65813,0.65,0.29952]', 'core_[0.65,0.65,0.29765]', 'core_[0.65,0.65,0.30139]', 'core_[0.65,0.64594,0.29952]', 'core_[0.65,0.65406,0.29952]', 'core_[0.64594,0.65,0.29952]', 'core_[0.65406,0.65,0.29952]', 'core_[0.65,0.65,0.29858]', 'core_[0.65,0.65,0.30046]', 'core_[0.65,0.64797,0.29952]', 'core_[0.65,0.65203,0.29952]', 'core_[0.64797,0.65,0.29952]', 'core_[0.65203,0.65,0.29952]', 'core_[0.65,0.65,0.3014]', 'core_[0.65,0.64797,0.30046]', 'core_[0.65,0.65203,0.30046]', 'core_[0.64797,0.65,0.30046]', 'core_[0.65203,0.65,0.30046]', 'core_[0.64797,0.65,0.3014]', 'core_[0.64797,0.64797,0.30046]', 'core_[0.64797,0.65203,0.30046]', 'core_[0.64595,0.65,0.30046]', 'core_[0.64999,0.65,0.30046]', 'core_[0.64595,0.65,0.29952]', 'core_[0.64595,0.65,0.3014]', 'core_[0.64595,0.64797,0.30046]', 'core_[0.64595,0.65203,0.30046]', 'core_[0.64393,0.65,0.30046]', 'core_[0.64393,0.65,0.29952]', 'core_[0.64393,0.65,0.3014]', 'core_[0.64393,0.64797,0.30046]', 'core_[0.64393,0.65203,0.30046]', 'core_[0.64192,0.65,0.30046]', 'core_[0.64594,0.65,0.30046]', 'core_[0.64192,0.65,0.29952]', 'core_[0.64192,0.65,0.3014]', 'core_[0.64192,0.64797,0.30046]', 'core_[0.64192,0.65203,0.30046]', 'core_[0.63991,0.65,0.30046]', 'core_[0.63991,0.65,0.29952]', 'core_[0.63991,0.65,0.3014]', 'core_[0.63991,0.64797,0.30046]', 'core_[0.63991,0.65203,0.30046]', 'core_[0.63791,0.65,0.30046]', 'core_[0.64191,0.65,0.30046]', 'core_[0.63791,0.65,0.29952]', 'core_[0.63791,0.65,0.3014]', 'core_[0.63791,0.64797,0.30046]', 'core_[0.63791,0.65203,0.30046]', 'core_[0.63592,0.65,0.30046]', 'core_[0.6399,0.65,0.30046]', 'core_[0.63592,0.65,0.29952]', 'core_[0.63592,0.65,0.3014]', 'core_[0.63592,0.64797,0.30046]', 'core_[0.63592,0.65203,0.30046]', 'core_[0.63393,0.65,0.30046]', 'core_[0.63393,0.65,0.29952]', 'core_[0.63393,0.65,0.3014]', 'core_[0.63393,0.64797,0.30046]', 'core_[0.63393,0.65203,0.30046]', 'core_[0.63195,0.65,0.30046]', 'core_[0.63591,0.65,0.30046]', 'core_[0.63195,0.65,0.29952]', 'core_[0.63195,0.65,0.3014]', 'core_[0.63195,0.64797,0.30046]', 'core_[0.63195,0.65203,0.30046]', 'core_[0.62998,0.65,0.30046]', 'core_[0.63392,0.65,0.30046]', 'core_[0.62998,0.65,0.29952]', 'core_[0.62998,0.65,0.3014]', 'core_[0.62998,0.64797,0.30046]', 'core_[0.62998,0.65203,0.30046]', 'core_[0.62801,0.65,0.30046]', 'core_[0.62801,0.65,0.29952]', 'core_[0.62801,0.65,0.3014]', 'core_[0.62801,0.64797,0.30046]', 'core_[0.62801,0.65203,0.30046]', 'core_[0.62605,0.65,0.30046]', 'core_[0.62997,0.65,0.30046]', 'core_[0.62605,0.65,0.29952]', 'core_[0.62605,0.65,0.3014]', 'core_[0.62605,0.64797,0.30046]', 'core_[0.62605,0.65203,0.30046]', 'core_[0.62409,0.65,0.30046]', 'core_[0.62409,0.65,0.29952]', 'core_[0.62409,0.65,0.3014]', 'core_[0.62409,0.64797,0.30046]', 'core_[0.62409,0.65203,0.30046]', 'core_[0.62214,0.65,0.30046]', 'core_[0.62604,0.65,0.30046]', 'core_[0.62214,0.65,0.29952]', 'core_[0.62214,0.65,0.3014]', 'core_[0.62214,0.64797,0.30046]', 'core_[0.62214,0.65203,0.30046]', 'core_[0.6202,0.65,0.30046]', 'core_[0.62408,0.65,0.30046]']
ns.shutdown()
time.sleep(0.1)
def test_search_method_simple():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='bb', base=bb_opt.BbOpt)
bb.initialize_abstract_level_3(objectives=objs, design_variables=dvs)
bb.connect_agent(hc.HillClimb, 'ka_rp_exploit')
ka = bb.get_attr('_proxy_server')
rp = ka.proxy('ka_rp_exploit')
rp.set_attr(problem=problem)
rp.set_attr(step_size=0.2)
rp.set_attr(step_rate=0.2)
rp.set_attr(step_limit=1)
rp.set_attr(convergence_criteria=0.001)
rp.set_random_seed(seed=103)
bb.update_abstract_lvl(3, 'core_[0.650,0.650,0.4]', {'design variables': {'x0': 0.650, 'x1': 0.650, 'x2': 0.4},
'objective functions': {'f0': 365.0, 'f1': 500.0, 'f2' : 600.0}}, panel='old')
bb.update_abstract_lvl(1, 'core_[0.650,0.650,0.4]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
rp.set_attr(new_designs=['core_[0.650,0.650,0.4]'])
rp.set_attr(core_select='fitness')
rp.set_attr(core_select_fraction=1.0)
rp.search_method()
time.sleep(0.075)
rp.set_attr(step_limit=100)
print( list(bb.get_blackboard()['level 3']['new'].keys()) )
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0.65,0.52,0.4]', 'core_[0.65,0.52,0.48]', 'core_[0.65,0.52,0.32]', 'core_[0.65,0.416,0.4]', 'core_[0.78,0.52,0.4]', 'core_[0.52,0.52,0.4]', 'core_[0.65,0.624,0.4]']
rp.search_method()
time.sleep(0.075)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0.65,0.52,0.4]', 'core_[0.65,0.52,0.48]', 'core_[0.65,0.52,0.32]', 'core_[0.65,0.416,0.4]', 'core_[0.78,0.52,0.4]', 'core_[0.52,0.52,0.4]', 'core_[0.65,0.624,0.4]', 'core_[0.65,0.78,0.4]', 'core_[0.65,0.78,0.32]', 'core_[0.52,0.78,0.4]', 'core_[0.78,0.78,0.4]', 'core_[0.65,0.936,0.4]', 'core_[0.65,0.78,0.48]', 'core_[0.65,0.78,0.336]', 'core_[0.754,0.78,0.4]', 'core_[0.546,0.78,0.4]', 'core_[0.65,0.9048,0.4]', 'core_[0.65,0.78,0.464]', 'core_[0.65,0.6552,0.4]', 'core_[0.65,0.87984,0.4]', 'core_[0.65,0.68016,0.4]', 'core_[0.5668,0.78,0.4]', 'core_[0.7332,0.78,0.4]', 'core_[0.65,0.78,0.4512]', 'core_[0.65,0.78,0.3488]', 'core_[0.65,0.78,0.35904]', 'core_[0.65,0.70013,0.4]', 'core_[0.65,0.85987,0.4]', 'core_[0.65,0.78,0.44096]', 'core_[0.58344,0.78,0.4]', 'core_[0.71656,0.78,0.4]', 'core_[0.59675,0.78,0.4]', 'core_[0.70325,0.78,0.4]', 'core_[0.65,0.7161,0.4]', 'core_[0.65,0.78,0.36723]', 'core_[0.65,0.8439,0.4]', 'core_[0.65,0.78,0.43277]', 'core_[0.6926,0.78,0.4]', 'core_[0.65,0.78,0.37379]', 'core_[0.65,0.83112,0.4]', 'core_[0.6074,0.78,0.4]', 'core_[0.65,0.72888,0.4]', 'core_[0.65,0.78,0.42621]', 'core_[0.65,0.78,0.42097]', 'core_[0.65,0.78,0.37903]', 'core_[0.68408,0.78,0.4]', 'core_[0.61592,0.78,0.4]', 'core_[0.65,0.73911,0.4]', 'core_[0.65,0.82089,0.4]', 'core_[0.65,0.78,0.38322]', 'core_[0.65,0.78,0.41678]', 'core_[0.67726,0.78,0.4]', 'core_[0.62274,0.78,0.4]', 'core_[0.65,0.81272,0.4]', 'core_[0.65,0.74728,0.4]', 'core_[0.65,0.78,0.38658]', 'core_[0.67181,0.78,0.4]', 'core_[0.65,0.80617,0.4]', 'core_[0.65,0.75383,0.4]', 'core_[0.65,0.78,0.41342]', 'core_[0.62819,0.78,0.4]', 'core_[0.65,0.78,0.38926]', 'core_[0.63255,0.78,0.4]', 'core_[0.65,0.80094,0.4]', 'core_[0.66745,0.78,0.4]', 'core_[0.65,0.75906,0.4]', 'core_[0.65,0.78,0.41074]', 'core_[0.65,0.79675,0.4]', 'core_[0.65,0.76325,0.4]', 'core_[0.66396,0.78,0.4]', 'core_[0.65,0.78,0.39141]', 'core_[0.65,0.78,0.40859]', 'core_[0.63604,0.78,0.4]', 'core_[0.65,0.7934,0.4]', 'core_[0.65,0.78,0.39313]', 'core_[0.66117,0.78,0.4]', 'core_[0.65,0.7666,0.4]', 'core_[0.65,0.78,0.40687]', 'core_[0.63883,0.78,0.4]', 'core_[0.64107,0.78,0.4]', 'core_[0.65,0.78,0.3945]', 'core_[0.65,0.79072,0.4]', 'core_[0.65893,0.78,0.4]', 'core_[0.65,0.78,0.4055]', 'core_[0.65,0.76928,0.4]', 'core_[0.65,0.78858,0.4]', 'core_[0.65715,0.78,0.4]', 'core_[0.65,0.77142,0.4]', 'core_[0.65,0.78,0.3956]', 'core_[0.64285,0.78,0.4]', 'core_[0.65,0.78,0.4044]', 'core_[0.65,0.78,0.39648]', 'core_[0.65,0.78,0.40352]', 'core_[0.65572,0.78,0.4]', 'core_[0.64428,0.78,0.4]', 'core_[0.65,0.77314,0.4]', 'core_[0.65,0.78686,0.4]', 'core_[0.64543,0.78,0.4]', 'core_[0.65457,0.78,0.4]', 'core_[0.65,0.77451,0.4]', 'core_[0.65,0.78,0.40281]', 'core_[0.65,0.78,0.39719]', 'core_[0.65,0.78549,0.4]', 'core_[0.64634,0.78,0.4]', 'core_[0.65366,0.78,0.4]', 'core_[0.65,0.77561,0.4]', 'core_[0.65,0.78439,0.4]', 'core_[0.65,0.78,0.40225]', 'core_[0.65,0.78,0.39775]', 'core_[0.65,0.78,0.3982]', 'core_[0.65,0.78351,0.4]', 'core_[0.65293,0.78,0.4]', 'core_[0.65,0.77649,0.4]', 'core_[0.64707,0.78,0.4]', 'core_[0.65,0.78,0.4018]', 'core_[0.65,0.78281,0.4]', 'core_[0.65,0.77719,0.4]', 'core_[0.65,0.78,0.39856]', 'core_[0.65234,0.78,0.4]', 'core_[0.64766,0.78,0.4]', 'core_[0.65,0.78,0.40144]', 'core_[0.65,0.77775,0.4]', 'core_[0.65,0.78225,0.4]', 'core_[0.65,0.78,0.39885]', 'core_[0.65,0.78,0.40115]', 'core_[0.65187,0.78,0.4]', 'core_[0.64813,0.78,0.4]', 'core_[0.65,0.78,0.40092]', 'core_[0.65,0.78,0.39908]', 'core_[0.65,0.7782,0.4]', 'core_[0.6515,0.78,0.4]', 'core_[0.6485,0.78,0.4]', 'core_[0.65,0.7818,0.4]', 'core_[0.6512,0.78,0.4]', 'core_[0.65,0.78,0.39926]', 'core_[0.65,0.78144,0.4]', 'core_[0.65,0.78,0.40074]', 'core_[0.65,0.77856,0.4]', 'core_[0.6488,0.78,0.4]', 'core_[0.64904,0.78,0.4]', 'core_[0.65,0.78,0.39941]', 'core_[0.65,0.78115,0.4]', 'core_[0.65,0.78,0.40059]', 'core_[0.65,0.77885,0.4]', 'core_[0.65096,0.78,0.4]', 'core_[0.65,0.78,0.39953]', 'core_[0.65,0.78,0.40047]', 'core_[0.65,0.78092,0.4]', 'core_[0.64923,0.78,0.4]', 'core_[0.65077,0.78,0.4]', 'core_[0.65,0.77908,0.4]']
ns.shutdown()
time.sleep(0.1)
def test_search_method_discrete_dv():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=bb_opt.BbOpt)
dv = {'x0' : {'options': ['0', '1', '2', '3'], 'default': '0', 'variable type': str},
'x1' : {'options': ['0', '1', '2', '3'], 'default': '1', 'variable type': str},
'x2' : {'options': ['0', '1', '2', '3'], 'default': '2', 'variable type': str},
'x3' : {'options': ['0', '1', '2', '3'], 'default': '3', 'variable type': str}}
obj = {'f1': {'ll': 10, 'ul':200, 'goal': 'lt', 'variable type': float}}
bb.initialize_abstract_level_3(design_variables=dv,objectives=obj,constraints={})
problem = BenchmarkProblem(design_variables=dv,
objectives=obj,
constraints={},
benchmark_name = 'tsp')
bb.connect_agent(hc.HillClimb, 'ka_rp')
rp = ns.proxy('ka_rp')
rp.set_attr(step_limit=10)
rp.set_random_seed(seed=109875)
rp.set_attr(hc_type='steepest ascent')
rp.set_attr(problem=problem)
bb.update_abstract_lvl(3, 'core_[3,1,2,0]', {'design variables': {'x0': '0', 'x1': '1', 'x2': '2', 'x3': '3'},
'objective functions': {'f1': 95.0},
'constraints': {}}, panel='old')
bb.update_abstract_lvl(1, 'core_[3,1,2,0]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
rp.set_attr(new_designs=['core_[3,1,2,0]'])
rp.set_attr(core_select='fitness')
rp.set_attr(core_select_fraction=1.0)
rp.search_method()
time.sleep(0.5)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0,1,2,0]', 'core_[0,1,3,3]', 'core_[0,2,2,3]', 'core_[3,1,2,3]', 'core_[0,1,3,2]', 'core_[0,1,2,3]', 'core_[0,0,3,3]', 'core_[1,1,3,3]', 'core_[0,0,3,0]', 'core_[0,0,1,3]', 'core_[0,3,3,3]', 'core_[1,0,3,3]', 'core_[0,0,3,2]', 'core_[0,0,2,3]', 'core_[3,0,3,3]', 'core_[0,0,3,1]', 'core_[0,2,3,3]', 'core_[0,0,0,3]', 'core_[2,0,3,3]']
ns.shutdown()
time.sleep(0.1)
def test_search_method_permutation_dv():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=bb_opt.BbOpt)
dv = {'x0' : {'permutation': ['0','1','2','3'], 'variable type': list}}
obj = {'f1': {'ll': 80, 'ul':200, 'goal': 'lt', 'variable type': float}}
bb.initialize_abstract_level_3(design_variables=dv,objectives=obj,constraints={})
problem = BenchmarkProblem(design_variables=dv,
objectives=obj,
constraints={},
benchmark_name = 'tsp_perm')
bb.connect_agent(hc.HillClimb, 'ka_rp')
rp = ns.proxy('ka_rp')
rp.set_attr(step_limit=10)
rp.set_random_seed(seed=109875)
rp.set_attr(hc_type='steepest ascent')
rp.set_attr(problem=problem)
bb.update_abstract_lvl(3, 'core_[3,1,2,0]', {'design variables': {'x0': ['0','1','2','3']},
'objective functions': {'f1': 95.0},
'constraints': {}}, panel='old')
bb.update_abstract_lvl(1, 'core_[3,1,2,0]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
rp.set_attr(new_designs=['core_[3,1,2,0]'])
rp.search_method()
time.sleep(0.5)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[[1,0,2,3]]', 'core_[[0,1,2,3]]']
ns.shutdown()
time.sleep(0.1)
def test_search_method_mixed_dv():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=bb_opt.BbOpt)
model = 're22'
objs = {'f0': {'ll':0.0, 'ul':500.0, 'goal':'lt', 'variable type': float},
'f1': {'ll':0.0, 'ul':50.0, 'goal':'lt', 'variable type': float},}
dvs = {'x0': {'options' : [0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93, 1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60, 1.76, 1.80, 1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79, 2.80, 3.0, 3.08, 3,10, 3.16, 3.41, 3.52, 3.60, 3.72, 3.95, 3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84, 5.0, 5.28, 5.40, 5.53, 5.72, 6.0, 6.16, 6.32, 6.60, 7.11, 7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0, 11.06, 11.85, 12.0, 13.0, 14.0, 15.0], 'variable type': float},
'x1': {'ll': 0.0, 'ul':20.0, 'variable type': float},
'x2': {'ll': 0.0, 'ul':40.0, 'variable type': float},}
problem = BenchmarkProblem(design_variables=dvs,
objectives=objs,
constraints={},
benchmark_name = model)
bb.initialize_abstract_level_3(design_variables=dvs,objectives=objs,constraints={})
bb.connect_agent(hc.HillClimb, 'ka_rp')
rp = ns.proxy('ka_rp')
rp.set_attr(step_limit=2)
rp.set_random_seed(seed=109875)
rp.set_attr(hc_type='steepest ascent')
rp.set_attr(problem=problem)
bb.update_abstract_lvl(3, 'core_[13.0,250.0,25.0]', {'design variables': {'x0': 13.0, 'x1': 10.0, 'x2': 20.0},
'objective functions': {'f0': 365.0, 'f1': 500.0,}}, panel='old')
bb.update_abstract_lvl(1, 'core_[13.0,250.0,25.0]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp.set_attr(lvl_read=bb.get_attr('abstract_lvls')['level 1'])
rp.set_attr(_lvl_data=bb.get_attr('abstract_lvls')['level 3']['old'])
rp.set_attr(new_designs=['core_[13.0,250.0,25.0]'])
rp.set_attr(core_select='fitness')
rp.set_attr(core_select_fraction=1.0)
rp.search_method()
time.sleep(0.5)
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[13.0,10.0,18.0]', 'core_[13.0,10.0,22.0]', 'core_[13.0,9.0,20.0]', 'core_[13.0,11.0,20.0]', 'core_[2.8,10.0,20.0]', 'core_[13.0,10.0,18.2]', 'core_[13.0,10.0,21.8]', 'core_[13.0,9.1,20.0]', 'core_[13.0,10.9,20.0]', 'core_[0.31,10.0,20.0]', 'core_[13.0,10.0,18.38]', 'core_[13.0,10.0,21.62]', 'core_[13.0,9.19,20.0]', 'core_[13.0,10.81,20.0]', 'core_[2.17,10.0,20.0]']
ns.shutdown()
time.sleep(0.1)
def test_force_shutdown():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=bb_opt.BbOpt)
bb.initialize_abstract_level_3(objectives=objs, design_variables=dvs)
bb.initialize_metadata_level()
bb.connect_agent(hc.HillClimb, 'ka_rp')
bb.set_attr(final_trigger=0)
bb.update_abstract_lvl(3, 'core_[0.650,0.650,0.4]', {'design variables': {'x0': 0.650, 'x1': 0.650, 'x2': 0.4},
'objective functions': {'f0': 365.0, 'f1': 500.0, 'f2' : 600.0}}, panel='old')
bb.update_abstract_lvl(1, 'core_[0.650,0.650,0.4]', {'pareto type' : 'pareto', 'fitness function' : 1.0})
rp = ns.proxy('ka_rp')
rp.set_random_seed(seed=109875)
rp.set_attr(problem=problem, debug_wait=True, debug_wait_time=0.05)
rp.set_attr(lvl_read=bb.get_blackboard()['level 1'], _lvl_data=bb.get_blackboard()['level 3']['old'], new_designs=['core_[0.650,0.650,0.4]'])
bb.set_attr(_kaar = {0: {}, 1: {'ka_rp': 2}}, _ka_to_execute=('ka_rp', 2))
rp.set_attr(core_select='fitness')
rp.set_attr(core_select_fraction=1.0)
bb.send_executor()
time.sleep(0.1)
bb.send_shutdown()
time.sleep(0.1)
assert ns.agents() == ['blackboard']
assert list(bb.get_blackboard()['level 3']['new'].keys()) == ['core_[0.585,0.65,0.4]', 'core_[0.585,0.715,0.4]']
ns.shutdown()
time.sleep(0.1) | 71.803604 | 4,558 | 0.541793 |
acdebbe939bb06aa4c1c1650494dbcb9e487bfb1 | 717 | py | Python | tests/molecular/molecules/molecule/with_canonical_atom_ordering/case_data.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | tests/molecular/molecules/molecule/with_canonical_atom_ordering/case_data.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | tests/molecular/molecules/molecule/with_canonical_atom_ordering/case_data.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | class CaseData:
"""
A test case.
Attributes
----------
molecule : :class:`.Molecule`
The molecule which should be canonically ordered.
result : :class:`.Molecule`
What the molecule should look like after canonical ordering.
"""
def __init__(self, molecule, result):
"""
Initialize a :class:`.CaseData` instance.
Parameters
----------
molecule : :class:`.Molecule`
The molecule which should be canonically ordered.
result : :class:`.Molecule`
What the molecule should look like after canonical
ordering.
"""
self.molecule = molecule
self.result = result
| 22.40625 | 68 | 0.564854 |
acdebc5e21796d6164096d8898d7791c1053903f | 959 | py | Python | server/app.py | amitab/helpqueue | 7aef922461d1dcdaf04f01984a966e3fc2e4e1b5 | [
"MIT"
] | null | null | null | server/app.py | amitab/helpqueue | 7aef922461d1dcdaf04f01984a966e3fc2e4e1b5 | [
"MIT"
] | null | null | null | server/app.py | amitab/helpqueue | 7aef922461d1dcdaf04f01984a966e3fc2e4e1b5 | [
"MIT"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
from flask import Flask
from flask_dotenv import DotEnv
import os
print("Initializing Backend")
app = Flask(__name__, static_folder='build')
env = DotEnv(app)
env.init_app(app, env_file="./.env", verbose_mode=True)
# For heroku launching
if "DATABASE_URL" in os.environ:
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ["DATABASE_URL"]
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# Database (uncomment if needed)
db = SQLAlchemy(app)
if app.config["DEBUG"]:
app.debug = True
else:
app.debug = False
# Routes for heroku push
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/<path:path>')
def static_proxy(path):
"""
First we attempt to see if a static file exists, otherwise we let the react
client do the routing.
"""
try:
return app.send_static_file(path)
except:
return app.send_static_file('index.html')
| 23.975 | 79 | 0.710115 |
acdebd479e277ca2334939ef3900cb2e6f20368a | 88,137 | py | Python | rocketpy/Function.py | DeepWater1013/RockedPy | 34410c6b22456d1829dcd0e594174cd8ad055104 | [
"MIT"
] | null | null | null | rocketpy/Function.py | DeepWater1013/RockedPy | 34410c6b22456d1829dcd0e594174cd8ad055104 | [
"MIT"
] | null | null | null | rocketpy/Function.py | DeepWater1013/RockedPy | 34410c6b22456d1829dcd0e594174cd8ad055104 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Giovani Hidalgo Ceotto, Lucas Kierulff Balabram"
__copyright__ = "Copyright 20XX, Projeto Jupiter"
__license__ = "MIT"
import re
import math
import bisect
import warnings
import time
from datetime import datetime, timedelta
from inspect import signature, getsourcelines
from collections import namedtuple
import numpy as np
from scipy import integrate
from scipy import linalg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
class Function:
"""Class converts a python function or a data sequence into an object
which can be handled more naturally, enabling easy interpolation,
extrapolation, ploting and algebra.
"""
def __init__(
self,
source,
inputs=["Scalar"],
outputs=["Scalar"],
interpolation=None,
extrapolation=None,
):
"""Convert source into a Function, to be used more naturally.
Set inputs, outputs, domain dimension, interpolation and extrapolation
method, and process the source.
Parameters
----------
source : function, scalar, ndarray, string
The actual function. If type is function, it will be called for
evaluation. If type is int or float, it will be treated as a
constant function. If ndarray, its points will be used for
interpolation. An ndarray should be as [(x0, y0, z0), (x1, y1, z1),
(x2, y2, z2), ...] where x0 and y0 are inputs and z0 is output. If
string, imports file named by the string and treats it as csv.
The file is converted into ndarray and should not have headers.
inputs : string, sequence of strings, optional
The name of the inputs of the function. Will be used for
representation and graphing (axis names). 'Scalar' is default.
If source is function, int or float and has multiple inputs,
this parameter must be given for correct operation.
outputs : string, sequence of strings, optional
The name of the outputs of the function. Will be used for
representation and graphing (axis names). Scalar is default.
interpolation : string, optional
Interpolation method to be used if source type is ndarray.
For 1-D functions, linear, polynomial, akima and spline are
supported. For N-D functions, only shepard is suporrted.
Default for 1-D functions is spline.
extrapolation : string, optional
Extrapolation method to be used if source type is ndarray.
Options are 'natural', which keeps interpolation, 'constant',
which returns the value of the function at the edge of the interval,
and 'zero', which returns zero for all points outside of source
range. Default for 1-D functions is constant.
Returns
-------
None
"""
# Set input and output
self.setInputs(inputs)
self.setOutputs(outputs)
# Save interpolation method
self.__interpolation__ = interpolation
self.__extrapolation__ = extrapolation
# Initialize last_interval
self.last_interval = 0
# Set source
self.setSource(source)
# Return
return None
# Define all set methods
def setInputs(self, inputs):
"""Set the name and number of the incoming arguments of the Function.
Parameters
----------
inputs : string, sequence of strings
The name of the parameters (inputs) of the Function.
Returns
-------
self : Function
"""
self.__inputs__ = [inputs] if isinstance(inputs, str) else list(inputs)
self.__domDim__ = len(self.__inputs__)
return self
def setOutputs(self, outputs):
"""Set the name and number of the output of the Function.
Parameters
----------
outputs : string, sequence of strings
The name of the output of the function. Example: Distance (m).
Returns
-------
self : Function
"""
self.__outputs__ = [outputs] if isinstance(outputs, str) else list(outputs)
self.__imgDim__ = len(self.__outputs__)
return self
def setSource(self, source):
"""Set the source which defines the output of the function giving a
certain input.
Parameters
----------
source : function, scalar, ndarray, string
The actual function. If type is function, it will be called for
evaluation. If type is int or float, it will be treated as a
constant function. If ndarray, its points will be used for
interpolation. An ndarray should be as [(x0, y0, z0), (x1, y1, z1),
(x2, y2, z2), ...] where x0 and y0 are inputs and z0 is output. If
string, imports file named by the string and treats it as csv.
The file is converted into ndarray and should not have headers.
Returns
-------
self : Function
"""
# Import CSV if source is a string and convert values to ndarray
if isinstance(source, str):
# Read file and check for headers
f = open(source, "r")
firstLine = f.readline()
# If headers are found...
if firstLine[0] in ['"', "'"]:
# Headers available
firstLine = firstLine.replace('"', " ").replace("'", " ")
firstLine = firstLine.split(" , ")
self.setInputs(firstLine[0])
self.setOutputs(firstLine[1:])
source = np.loadtxt(source, delimiter=",", skiprows=1, dtype=float)
# if headers are not found
else:
source = np.loadtxt(source, delimiter=",", dtype=float)
# Convert to ndarray if source is a list
if isinstance(source, (list, tuple)):
source = np.array(source, dtype=np.float64)
# Convert number source into vectorized lambda function
if isinstance(source, (int, float)):
temp = 1 * source
source = lambda x: 0 * x + temp
# Handle callable source or number source
if callable(source):
# Set source
self.source = source
# Set geValueOpt2
self.getValueOpt = source
# Set arguments name and domain dimensions
parameters = signature(source).parameters
self.__domDim__ = len(parameters)
if self.__inputs__ == ["Time (s)"]:
self.__inputs__ = list(parameters)
# Set interpolation and extrapolation
self.__interpolation__ = None
self.__extrapolation__ = None
# Handle ndarray source
else:
# Check to see if dimensions match incoming data set
newTotalDim = len(source[0, :])
oldTotalDim = self.__domDim__ + self.__imgDim__
dV = self.__inputs__ == ["Scalar"] and self.__outputs__ == ["Scalar"]
# If they don't, update default values or throw error
if newTotalDim != oldTotalDim:
if dV:
# Update dimensions and inputs
self.__domDim__ = newTotalDim - 1
self.__inputs__ = self.__domDim__ * self.__inputs__
else:
# User has made a mistake inputting inputs and outputs
print("Error in input and output dimensions!")
return None
# Do things if domDim is 1
if self.__domDim__ == 1:
source = source[source[:, 0].argsort()]
# Finally set data source as source
self.source = source
# Set default interpolation for point source if it hasn't
if self.__interpolation__ is None:
self.setInterpolation()
else:
# Updates interpolation coefficients
self.setInterpolation(self.__interpolation__)
# Do things if function is multivariate
else:
# Finally set data source as source
self.source = source
if self.__interpolation__ is None:
self.setInterpolation("shepard")
# Update extrapolation method
if self.__extrapolation__ is None:
self.setExtrapolation()
# Return self
return self
def setInterpolation(self, method="spline"):
"""Set interpolation method and process data is method requires.
Parameters
----------
method : string, optional
Interpolation method to be used if source type is ndarray.
For 1-D functions, linear, polynomial, akima and spline is
supported. For N-D functions, only shepard is suporrted.
Default is 'spline'.
Returns
-------
self : Function
"""
# Set interpolation method
self.__interpolation__ = method
# Spline, akima and polynomial need data processing
# Shepard, and linear do not
if method == "spline":
self.__interpolateSpline__()
elif method == "polynomial":
self.__interpolatePolynomial__()
elif method == "akima":
self.__interpolateAkima__()
# Set geValueOpt
self.setGetValueOpt()
# Returns self
return self
def setExtrapolation(self, method="constant"):
"""Set extrapolation behavior of data set.
Parameters
----------
extrapolation : string, optional
Extrapolation method to be used if source type is ndarray.
Options are 'natural', which keeps interpolation, 'constant',
which returns the value of the function at the edge of the interval,
and 'zero', which returns zero for all points outside of source
range. Default is 'zero'.
Returns
-------
self : Function
"""
# Set extrapolation method
self.__extrapolation__ = method
# Return self
return self
def setGetValueOpt(self):
"""Crates a method that evaluates interpolations rather quickly
when compared to other options available, such as just calling
the object instance or calling self.getValue directly. See
Function.getValueOpt for documentation.
Returns
-------
self : Function
"""
# Retrieve general info
xData = self.source[:, 0]
yData = self.source[:, 1]
xmin, xmax = xData[0], xData[-1]
if self.__extrapolation__ == "zero":
extrapolation = 0 # Extrapolation is zero
elif self.__extrapolation__ == "natural":
extrapolation = 1 # Extrapolation is natural
else:
extrapolation = 2 # Extrapolation is constant
# Crete method to interpolate this info for each interpolation type
if self.__interpolation__ == "spline":
coeffs = self.__splineCoefficients__
def getValueOpt(x):
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
xInterval = xInterval if xInterval != 0 else 1
a = coeffs[:, xInterval - 1]
x = x - xData[xInterval - 1]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
a = coeffs[:, 0] if x < xmin else coeffs[:, -1]
x = x - xData[0] if x < xmin else x - xData[-2]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
self.getValueOpt = getValueOpt
elif self.__interpolation__ == "linear":
def getValueOpt(x):
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
y = (x - xData[xInterval - 1]) * (dy / dx) + yData[xInterval - 1]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
xInterval = 1 if x < xmin else -1
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
y = (x - xData[xInterval - 1]) * (dy / dx) + yData[
xInterval - 1
]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
self.getValueOpt = getValueOpt
elif self.__interpolation__ == "akima":
coeffs = np.array(self.__akimaCoefficients__)
def getValueOpt(x):
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
xInterval = xInterval if xInterval != 0 else 1
a = coeffs[4 * xInterval - 4 : 4 * xInterval]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
a = coeffs[:4] if x < xmin else coeffs[-4:]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
self.getValueOpt = getValueOpt
elif self.__interpolation__ == "polynomial":
coeffs = self.__polynomialCoefficients__
def getValueOpt(x):
# Interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
y = 0
for i in range(len(coeffs)):
y += coeffs[i] * (x ** i)
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
y = 0
for i in range(len(coeffs)):
y += coeffs[i] * (x ** i)
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
self.getValueOpt = getValueOpt
elif self.__interpolation__ == "shepard":
xData = self.source[:, 0:-1] # Support for N-Dimensions
len_yData = len(yData) # A little speed up
def getValueOpt(*args):
x = np.array([[float(x) for x in list(args)]])
numeratorSum = 0
denominatorSum = 0
for i in range(len_yData):
sub = xData[i] - x
distance = np.linalg.norm(sub)
if distance == 0:
numeratorSum = yData[i]
denominatorSum = 1
break
else:
weight = distance ** (-3)
numeratorSum = numeratorSum + yData[i] * weight
denominatorSum = denominatorSum + weight
return numeratorSum / denominatorSum
self.getValueOpt = getValueOpt
# Returns self
return self
def setDiscrete(
self,
lower=0,
upper=10,
samples=200,
interpolation="spline",
extrapolation="constant",
oneByOne=True,
):
"""This method transforms function defined Functions into list
defined Functions. It evaluates the function at certain points
(sampling range) and stores the results in a list, which is converted
into a Function and then returned. The original Function object is
replaced by the new one.
Parameters
----------
lower : scalar, optional
Value where sampling range will start. Default is 0.
upper : scalar, optional
Value where sampling range will end. Default is 10.
samples : int, optional
Number of samples to be taken from inside range. Default is 200.
interpolation : string
Interpolation method to be used if source type is ndarray.
For 1-D functions, linear, polynomail, akima and spline is
supported. For N-D functions, only shepard is suporrted.
Default is 'spline'.
extrapolation : string, optional
Extrapolation method to be used if source type is ndarray.
Options are 'natural', which keeps interpolation, 'constant',
which returns the value of the function at the edge of the interval,
and 'zero', which returns zero for all points outside of source
range. Default is 'constant'.
oneByOne : boolean, optional
If True, evaluate Function in each sample point separately. If
False, evaluates Function in vectorized form. Default is True.
Returns
-------
self : Function
"""
if self.__domDim__ == 1:
Xs = np.linspace(lower, upper, samples)
Ys = self.getValue(Xs.tolist()) if oneByOne else self.getValue(Xs)
self.source = np.concatenate(([Xs], [Ys])).transpose()
self.setInterpolation(interpolation)
self.setExtrapolation(extrapolation)
elif self.__domDim__ == 2:
lower = 2 * [lower] if isinstance(lower, (int, float)) else lower
upper = 2 * [upper] if isinstance(upper, (int, float)) else upper
sam = 2 * [samples] if isinstance(samples, (int, float)) else samples
# Create nodes to evaluate function
Xs = np.linspace(lower[0], upper[0], sam[0])
Ys = np.linspace(lower[1], upper[1], sam[1])
Xs, Ys = np.meshgrid(Xs, Ys)
Xs, Ys = Xs.flatten(), Ys.flatten()
mesh = [[Xs[i], Ys[i]] for i in range(len(Xs))]
# Evaluate function at all mesh nodes and convert it to matrix
Zs = np.array(self.getValue(mesh))
self.source = np.concatenate(([Xs], [Ys], [Zs])).transpose()
self.__interpolation__ = "shepard"
return self
# Define all get methods
def getInputs(self):
"Return tuple of inputs of the function."
return self.__inputs__
def getOutputs(self):
"Return tuple of outputs of the function."
return self.__outputs__
def getSource(self):
"Return source list or function of the Function."
return self.source
def getImageDim(self):
"Return int describing dimension of the image space of the function."
return self.__imgDim__
def getDomainDim(self):
"Return int describing dimension of the domain space of the function."
return self.__domDim__
def getInterpolationMethod(self):
"Return string describing interpolation method used."
return self.__interpolation__
def getExtrapolationMethod(self):
"Return string describing extrapolation method used."
return self.__extrapolation__
def getValue(self, *args):
"""This method returns the value of the Function at the specified
point. See Function.getValueOpt for a faster, but limited,
implementation.
Parameters
----------
args : scalar, list
Value where the Function is to be evaluated. If the Function is
1-D, only one argument is expected, which may be an int, a float
or a list of ints or floats, in which case the Function will be
evaluated at all points in the list and a list of floats will be
returned. If the function is N-D, N arguments must be given, each
one being an scalar or list.
Returns
-------
ans : scalar, list
"""
# Return value for Function of function type
if callable(self.source):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
if isinstance(args[0][0], (tuple, list)):
return [self.source(*arg) for arg in args[0]]
else:
return [self.source(arg) for arg in args[0]]
elif len(args) == 1 and isinstance(args[0], np.ndarray):
return self.source(args[0])
else:
return self.source(*args)
# Returns value for shepard interpolation
elif self.__interpolation__ == "shepard":
if isinstance(args[0], (list, tuple)):
x = list(args[0])
else:
x = [[float(x) for x in list(args)]]
ans = x
xData = self.source[:, 0:-1]
yData = self.source[:, -1]
for i in range(len(x)):
numeratorSum = 0
denominatorSum = 0
for o in range(len(yData)):
sub = xData[o] - x[i]
distance = (sub.dot(sub)) ** (0.5)
# print(xData[o], x[i], distance)
if distance == 0:
numeratorSum = yData[o]
denominatorSum = 1
break
else:
weight = distance ** (-3)
numeratorSum = numeratorSum + yData[o] * weight
denominatorSum = denominatorSum + weight
ans[i] = numeratorSum / denominatorSum
return ans if len(ans) > 1 else ans[0]
# Returns value for polynomial interpolation function type
elif self.__interpolation__ == "polynomial":
if isinstance(args[0], (int, float)):
args = [list(args)]
x = np.array(args[0])
xData = self.source[:, 0]
yData = self.source[:, 1]
xmin, xmax = xData[0], xData[-1]
coeffs = self.__polynomialCoefficients__
A = np.zeros((len(args[0]), coeffs.shape[0]))
for i in range(coeffs.shape[0]):
A[:, i] = x ** i
ans = A.dot(coeffs).tolist()
for i in range(len(x)):
if not (xmin <= x[i] <= xmax):
if self.__extrapolation__ == "constant":
ans[i] = yData[0] if x[i] < xmin else yData[-1]
elif self.__extrapolation__ == "zero":
ans[i] = 0
return ans if len(ans) > 1 else ans[0]
# Returns value for spline, akima or linear interpolation function type
elif self.__interpolation__ in ["spline", "akima", "linear"]:
if isinstance(args[0], (int, float, complex)):
args = [list(args)]
x = [arg for arg in args[0]]
xData = self.source[:, 0]
yData = self.source[:, 1]
xIntervals = np.searchsorted(xData, x)
xmin, xmax = xData[0], xData[-1]
if self.__interpolation__ == "spline":
coeffs = self.__splineCoefficients__
for i in range(len(x)):
if x[i] == xmin or x[i] == xmax:
x[i] = yData[xIntervals[i]]
elif xmin < x[i] < xmax or (self.__extrapolation__ == "natural"):
if not xmin < x[i] < xmax:
a = coeffs[:, 0] if x[i] < xmin else coeffs[:, -1]
x[i] = x[i] - xData[0] if x[i] < xmin else x[i] - xData[-2]
else:
a = coeffs[:, xIntervals[i] - 1]
x[i] = x[i] - xData[xIntervals[i] - 1]
x[i] = a[3] * x[i] ** 3 + a[2] * x[i] ** 2 + a[1] * x[i] + a[0]
else:
# Extrapolate
if self.__extrapolation__ == "zero":
x[i] = 0
else: # Extrapolation is set to constant
x[i] = yData[0] if x[i] < xmin else yData[-1]
elif self.__interpolation__ == "linear":
for i in range(len(x)):
# Interval found... interpolate... or extrapolate
inter = xIntervals[i]
if xmin <= x[i] <= xmax:
# Interpolate
dx = float(xData[inter] - xData[inter - 1])
dy = float(yData[inter] - yData[inter - 1])
x[i] = (x[i] - xData[inter - 1]) * (dy / dx) + yData[inter - 1]
else:
# Extrapolate
if self.__extrapolation__ == "zero": # Extrapolation == zero
x[i] = 0
elif (
self.__extrapolation__ == "natural"
): # Extrapolation == natural
inter = 1 if x[i] < xmin else -1
dx = float(xData[inter] - xData[inter - 1])
dy = float(yData[inter] - yData[inter - 1])
x[i] = (x[i] - xData[inter - 1]) * (dy / dx) + yData[
inter - 1
]
else: # Extrapolation is set to constant
x[i] = yData[0] if x[i] < xmin else yData[-1]
else:
coeffs = self.__akimaCoefficients__
for i in range(len(x)):
if x[i] == xmin or x[i] == xmax:
x[i] = yData[xIntervals[i]]
elif xmin < x[i] < xmax or (self.__extrapolation__ == "natural"):
if not (xmin < x[i] < xmax):
a = coeffs[:4] if x[i] < xmin else coeffs[-4:]
else:
a = coeffs[4 * xIntervals[i] - 4 : 4 * xIntervals[i]]
x[i] = a[3] * x[i] ** 3 + a[2] * x[i] ** 2 + a[1] * x[i] + a[0]
else:
# Extrapolate
if self.__extrapolation__ == "zero":
x[i] = 0
else: # Extrapolation is set to constant
x[i] = yData[0] if x[i] < xmin else yData[-1]
if isinstance(args[0], np.ndarray):
return np.array(x)
else:
return x if len(x) > 1 else x[0]
def getValueOpt_deprecated(self, *args):
"""THE CODE BELOW IS HERE FOR DOCUMENTATION PURPOSES ONLY. IT WAS
REPLACED FOR ALL INSTANCES BY THE FUNCTION.SETGETVALUEOPT METHOD.
This method returns the value of the Function at the specified
point in a limited but optimized manner. See Function.getValue for an
implementation which allows more kinds of inputs.
This method optimizes the Function.getValue method by only
implementing function evaluations of single inputs, i.e., it is not
vectorized. Furthermore, it actually implements a different method
for each interpolation type, eliminating some if statements.
Currently supports callables and spline, linear, akima, polynomial and
shepard interpolated Function objects.
Parameters
----------
args : scalar
Value where the Function is to be evaluated. If the Function is
1-D, only one argument is expected, which may be an int or a float
If the function is N-D, N arguments must be given, each one being
an int or a float.
Returns
-------
x : scalar
"""
# Callables
if callable(self.source):
return self.source(*args)
# Interpolated Function
# Retrieve general info
xData = self.source[:, 0]
yData = self.source[:, 1]
xmin, xmax = xData[0], xData[-1]
if self.__extrapolation__ == "zero":
extrapolation = 0 # Extrapolation is zero
elif self.__extrapolation__ == "natural":
extrapolation = 1 # Extrapolation is natural
else:
extrapolation = 2 # Extrapolation is constant
# Interpolate this info for each interpolation type
# Spline
if self.__interpolation__ == "spline":
x = args[0]
coeffs = self.__splineCoefficients__
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
xInterval = xInterval if xInterval != 0 else 1
a = coeffs[:, xInterval - 1]
x = x - xData[xInterval - 1]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
a = coeffs[:, 0] if x < xmin else coeffs[:, -1]
x = x - xData[0] if x < xmin else x - xData[-2]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
# Linear
elif self.__interpolation__ == "linear":
x = args[0]
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
y = (x - xData[xInterval - 1]) * (dy / dx) + yData[xInterval - 1]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
xInterval = 1 if x < xmin else -1
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
y = (x - xData[xInterval - 1]) * (dy / dx) + yData[xInterval - 1]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
# Akima
elif self.__interpolation__ == "akima":
x = args[0]
coeffs = np.array(self.__akimaCoefficients__)
xInterval = np.searchsorted(xData, x)
# Interval found... interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
xInterval = xInterval if xInterval != 0 else 1
a = coeffs[4 * xInterval - 4 : 4 * xInterval]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
a = coeffs[:4] if x < xmin else coeffs[-4:]
y = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
# Polynominal
elif self.__interpolation__ == "polynomial":
x = args[0]
coeffs = self.__polynomialCoefficients__
# Interpolate... or extrapolate
if xmin <= x <= xmax:
# Interpolate
y = 0
for i in range(len(coeffs)):
y += coeffs[i] * (x ** i)
else:
# Extrapolate
if extrapolation == 0: # Extrapolation == zero
y = 0
elif extrapolation == 1: # Extrapolation == natural
y = 0
for i in range(len(coeffs)):
y += coeffs[i] * (x ** i)
else: # Extrapolation is set to constant
y = yData[0] if x < xmin else yData[-1]
return y
# Shepard
elif self.__interpolation__ == "shepard":
xData = self.source[:, 0:-1] # Support for N-Dimensions
len_yData = len(yData) # A little speed up
x = np.array([[float(x) for x in list(args)]])
numeratorSum = 0
denominatorSum = 0
for i in range(len_yData):
sub = xData[i] - x
distance = np.linalg.norm(sub)
if distance == 0:
numeratorSum = yData[i]
denominatorSum = 1
break
else:
weight = distance ** (-3)
numeratorSum = numeratorSum + yData[i] * weight
denominatorSum = denominatorSum + weight
return numeratorSum / denominatorSum
def getValueOpt2(self, *args):
"""DEPRECATED!! - See Function.getValueOpt for new version.
This method returns the value of the Function at the specified
point in a limited but optimized manner. See Function.getValue for an
implementation which allows more kinds of inputs.
This method optimizes the Function.getValue method by only
implementing function evaluations of single inputs, i.e., it is not
vectorized. Furthermore, it actually implements a different method
for each interpolation type, eliminating some if statements.
Finally, it uses Numba to compile the methods, which further optimizes
the implementation.
The code below is here for documentation purposes only. It is
overwritten for all instances by the Function.setGetValuteOpt2 method.
Parameters
----------
args : scalar
Value where the Function is to be evaluated. If the Function is
1-D, only one argument is expected, which may be an int or a float
If the function is N-D, N arguments must be given, each one being
an int or a float.
Returns
-------
x : scalar
"""
# Returns value for function function type
if callable(self.source):
return self.source(*args)
# Returns value for spline, akima or linear interpolation function type
elif self.__interpolation__ in ["spline", "akima", "linear"]:
x = args[0]
xData = self.source[:, 0]
yData = self.source[:, 1]
# Hunt in intervals near the last interval which was used.
xInterval = self.last_interval
if xData[xInterval - 1] <= x <= xData[xInterval]:
pass
else:
xInterval = np.searchsorted(xData, x)
self.last_interval = xInterval if xInterval < len(xData) else 0
# Interval found... keep going
xmin, xmax = xData[0], xData[-1]
if self.__interpolation__ == "spline":
coeffs = self.__splineCoefficients__
if x == xmin or x == xmax:
x = yData[xInterval]
elif xmin < x < xmax or (self.__extrapolation__ == "natural"):
if not xmin < x < xmax:
a = coeffs[:, 0] if x < xmin else coeffs[:, -1]
x = x - xData[0] if x < xmin else x - xData[-2]
else:
a = coeffs[:, xInterval - 1]
x = x - xData[xInterval - 1]
x = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if self.__extrapolation__ == "zero":
x = 0
else: # Extrapolation is set to constant
x = yData[0] if x < xmin else yData[-1]
elif self.__interpolation__ == "linear":
if x == xmin or x == xmax:
x = yData[xInterval]
elif xmin < x < xmax or (self.__extrapolation__ == "natural"):
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
x = (x - xData[xInterval - 1]) * (dy / dx) + yData[xInterval - 1]
elif self.__extrapolation__ == "natural":
y0 = yData[0] if x < xmin else yData[-1]
xInterval = 1 if x < xmin else -1
dx = float(xData[xInterval] - xData[xInterval - 1])
dy = float(yData[xInterval] - yData[xInterval - 1])
x = (x - xData[xInterval - 1]) * (dy / dx) + y0
else:
# Extrapolate
if self.__extrapolation__ == "zero":
x = 0
else: # Extrapolation is set to constant
x = yData[0] if x < xmin else yData[-1]
else:
if self.__interpolation__ == "akima":
coeffs = self.__akimaCoefficients__
if x == xmin or x == xmax:
x = yData[xInterval]
elif xmin < x < xmax:
a = coeffs[4 * xInterval - 4 : 4 * xInterval]
x = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
elif self.__extrapolation__ == "natural":
a = coeffs[:4] if x < xmin else coeffs[-4:]
x = a[3] * x ** 3 + a[2] * x ** 2 + a[1] * x + a[0]
else:
# Extrapolate
if self.__extrapolation__ == "zero":
x = 0
else: # Extrapolation is set to constant
x = yData[0] if x < xmin else yData[-1]
return x
def __getitem__(self, args):
"""Returns item of the Function source. If the source is not an array,
an error will result.
Parameters
----------
args : int, float
Index of the item to be retrieved.
Returns
-------
self.source[args] : float, array
Item specified from Function.source.
"""
return self.source[args]
def __len__(self):
"""Returns length of the Function source. If the source is not an
array, an error will result.
Returns
-------
len(self.source) : int
Length of Function.source.
"""
return len(self.source)
# Define all presentation methods
def __call__(self, *args):
"""Plot the Function if no argument is given. If an
argument is given, return the value of the function at the desired
point.
Parameters
----------
args : scalar, list, optional
Value where the Function is to be evaluated. If the Function is
1-D, only one argument is expected, which may be an int, a float
or a list of ints or floats, in which case the Function will be
evaluated at all points in the list and a list of floats will be
returned. If the function is N-D, N arguments must be given, each
one being an scalar or list.
Returns
-------
ans : None, scalar, list
"""
if len(args) == 0:
return self.plot()
else:
return self.getValue(*args)
def __str__(self):
"Return a string representation of the Function"
return (
"Function from R"
+ str(self.__domDim__)
+ " to R"
+ str(self.__imgDim__)
+ " : ("
+ ", ".join(self.__inputs__)
+ ") → ("
+ ", ".join(self.__outputs__)
+ ")"
)
def __repr__(self):
"Return a string representation of the Function"
return (
"Function from R"
+ str(self.__domDim__)
+ " to R"
+ str(self.__imgDim__)
+ " : ("
+ ", ".join(self.__inputs__)
+ ") → ("
+ ", ".join(self.__outputs__)
+ ")"
)
def plot(self, *args, **kwargs):
"""Call Function.plot1D if Function is 1-Dimensional or call
Function.plot2D if Function is 2-Dimensional and forward arguments
and key-word arguments."""
if isinstance(self, list):
# Compare multiple plots
Function.comparePlots(self)
else:
if self.__domDim__ == 1:
self.plot1D(*args, **kwargs)
elif self.__domDim__ == 2:
self.plot2D(*args, **kwargs)
else:
print("Error: Only functions with 1D or 2D domains are plottable!")
def plot1D(
self,
lower=None,
upper=None,
samples=1000,
forceData=False,
forcePoints=False,
returnObject=False,
):
"""Plot 1-Dimensional Function, from a lower limit to an upper limit,
by sampling the Function several times in the interval. The title of
the graph is given by the name of the axes, which are taken from
the Function`s input and output names.
Parameters
----------
lower : scalar, optional
The lower limit of the interval in which the function is to be
ploted. The default value for function type Functions is 0. By
contrast, if the Function is given by a dataset, the default
value is the start of the dataset.
upper : scalar, optional
The upper limit of the interval in which the function is to be
ploted. The default value for function type Functions is 10. By
contrast, if the Function is given by a dataset, the default
value is the end of the dataset.
samples : int, optional
The number of samples in which the function will be evaluated for
plotting it, which draws lines between each evaluated point.
The default value is 1000.
forceData : Boolean, optional
If Function is given by an interpolated dataset, setting forceData
to True will plot all points, as a scatter, in the dataset.
Default value is False.
forcePoints : Boolean, optional
Setting forcePoints to True will plot all points, as a scatter, in
which the Function was evaluated in the dataset. Default value is
False.
Returns
-------
None
"""
# Define a mesh and y values at mesh nodes for plotting
fig = plt.figure()
ax = fig.axes
if callable(self.source):
# Determine boundaries
lower = 0 if lower is None else lower
upper = 10 if upper is None else upper
else:
# Determine boundaries
xData = self.source[:, 0]
xmin, xmax = xData[0], xData[-1]
lower = xmin if lower is None else lower
upper = xmax if upper is None else upper
# Plot data points if forceData = True
tooLow = True if xmin >= lower else False
tooHigh = True if xmax <= upper else False
loInd = 0 if tooLow else np.where(xData >= lower)[0][0]
upInd = len(xData) - 1 if tooHigh else np.where(xData <= upper)[0][0]
points = self.source[loInd : (upInd + 1), :].T.tolist()
if forceData:
plt.scatter(points[0], points[1], marker="o")
# Calculate function at mesh nodes
x = np.linspace(lower, upper, samples)
y = self.getValue(x.tolist())
# Plots function
if forcePoints:
plt.scatter(x, y, marker="o")
plt.plot(x, y)
# Turn on grid and set title and axis
plt.grid(True)
plt.title(self.__outputs__[0].title() + " x " + self.__inputs__[0].title())
plt.xlabel(self.__inputs__[0].title())
plt.ylabel(self.__outputs__[0].title())
plt.show()
if returnObject:
return fig, ax
def plot2D(
self,
lower=None,
upper=None,
samples=[30, 30],
forceData=True,
dispType="surface",
):
"""Plot 2-Dimensional Function, from a lower limit to an upper limit,
by sampling the Function several times in the interval. The title of
the graph is given by the name of the axis, which are taken from
the Function`s inputs and output names.
Parameters
----------
lower : scalar, array of int or float, optional
The lower limits of the interval in which the function is to be
ploted, which can be an int or float, which is repeated for both
axis, or an array specifying the limit for each axis. The default
value for function type Functions is 0. By contrast, if the
Function is given by a dataset, the default value is the start of
the dataset for each axis.
upper : scalar, array of int or float, optional
The upper limits of the interval in which the function is to be
ploted, which can be an int or float, which is repeated for both
axis, or an array specifying the limit for each axis. The default
value for function type Functions is 0. By contrast, if the
Function is given by a dataset, the default value is the end of
the dataset for each axis.
samples : int, array of int, optional
The number of samples in which the function will be evaluated for
plotting it, which draws lines between each evaluated point.
The default value is 30 for each axis.
forceData : Boolean, optional
If Function is given by an interpolated dataset, setting forceData
to True will plot all points, as a scatter, in the dataset.
Default value is False.
dispType : string, optional
Display type of plotted graph, which can be surface, wireframe,
contour, or contourf. Default value is surface.
Returns
-------
None
"""
# Prepare plot
figure = plt.figure()
axes = figure.gca(projection="3d")
# Define a mesh and f values at mesh nodes for plotting
if callable(self.source):
# Determine boundaries
lower = [0, 0] if lower is None else lower
lower = 2 * [lower] if isinstance(lower, (int, float)) else lower
upper = [10, 10] if upper is None else upper
upper = 2 * [upper] if isinstance(upper, (int, float)) else upper
else:
# Determine boundaries
xData = self.source[:, 0]
yData = self.source[:, 1]
xMin, xMax = xData.min(), xData.max()
yMin, yMax = yData.min(), yData.max()
lower = [xMin, yMin] if lower is None else lower
lower = 2 * [lower] if isinstance(lower, (int, float)) else lower
upper = [xMax, yMax] if upper is None else upper
upper = 2 * [upper] if isinstance(upper, (int, float)) else upper
# Plot data points if forceData = True
if forceData:
axes.scatter(xData, yData, self.source[:, -1])
# Create nodes to evaluate function
x = np.linspace(lower[0], upper[0], samples[0])
y = np.linspace(lower[1], upper[1], samples[1])
meshX, meshY = np.meshgrid(x, y)
meshXFlat, meshYFlat = meshX.flatten(), meshY.flatten()
mesh = [[meshXFlat[i], meshYFlat[i]] for i in range(len(meshXFlat))]
# Evaluate function at all mesh nodes and convert it to matrix
z = np.array(self.getValue(mesh)).reshape(meshX.shape)
# Plot function
if dispType == "surface":
surf = axes.plot_surface(
meshX,
meshY,
z,
rstride=1,
cstride=1,
# cmap=cm.coolwarm,
linewidth=0,
alpha=0.6,
)
figure.colorbar(surf)
elif dispType == "wireframe":
axes.plot_wireframe(meshX, meshY, z, rstride=1, cstride=1)
elif dispType == "contour":
figure.clf()
CS = plt.contour(meshX, meshY, z)
plt.clabel(CS, inline=1, fontsize=10)
elif dispType == "contourf":
figure.clf()
CS = plt.contour(meshX, meshY, z)
plt.contourf(meshX, meshY, z)
plt.clabel(CS, inline=1, fontsize=10)
# axes.contourf(meshX, meshY, z, zdir='x', offset=xMin, cmap=cm.coolwarm)
# axes.contourf(meshX, meshY, z, zdir='y', offset=yMax, cmap=cm.coolwarm)
plt.title(
self.__outputs__[0].title()
+ " x "
+ self.__inputs__[0].title()
+ " x "
+ self.__inputs__[1].title()
)
axes.set_xlabel(self.__inputs__[0].title())
axes.set_ylabel(self.__inputs__[1].title())
axes.set_zlabel(self.__outputs__[0].title())
plt.show()
@staticmethod
def comparePlots(
plot_list,
lower=None,
upper=None,
samples=1000,
title="",
xlabel="",
ylabel="",
forceData=False,
forcePoints=False,
returnObject=False,
):
"""Plots N 1-Dimensional Functions in the same plot, from a lower
limit to an upper limit, by sampling the Functions several times in
the interval.
Parameters
----------
plot_list : list
List of Functions or list of tuples in the format (Function,
label), where label is a string which will be displayed in the
legend.
lower : scalar, optional
The lower limit of the interval in which the Functions are to be
ploted. The default value for function type Functions is 0. By
contrast, if the Functions given are defined by a dataset, the
default value is the lowest value of the datasets.
upper : scalar, optional
The upper limit of the interval in which the Functions are to be
ploted. The default value for function type Functions is 10. By
contrast, if the Functions given are defined by a dataset, the
default value is the highest value of the datasets.
samples : int, optional
The number of samples in which the functions will be evaluated for
plotting it, which draws lines between each evaluated point.
The default value is 1000.
title : string, optional
Title of the plot. Default value is an empty string.
xlabel : string, optional
X-axis label. Default value is an empty string.
ylabel : string, optional
Y-axis label. Default value is an empty string.
forceData : Boolean, optional
If Function is given by an interpolated dataset, setting forceData
to True will plot all points, as a scatter, in the dataset.
Default value is False.
forcePoints : Boolean, optional
Setting forcePoints to True will plot all points, as a scatter, in
which the Function was evaluated to plot it. Default value is
False.
Returns
-------
None
"""
noRangeSpecified = True if lower is None and upper is None else False
# Convert to list of tuples if list of Function was given
plots = []
for plot in plot_list:
if isinstance(plot, (tuple, list)):
plots.append(plot)
else:
plots.append((plot, ""))
# plots = []
# if isinstance(plot_list[0], (tuple, list)) == False:
# for plot in plot_list:
# plots.append((plot, " "))
# else:
# plots = plot_list
# Create plot figure
fig, ax = plt.subplots()
# Define a mesh and y values at mesh nodes for plotting
if lower is None:
lower = 0
for plot in plots:
if not callable(plot[0].source):
# Determine boundaries
xmin = plot[0].source[0, 0]
lower = xmin if xmin < lower else lower
if upper is None:
upper = 10
for plot in plots:
if not callable(plot[0].source):
# Determine boundaries
xmax = plot[0].source[-1, 0]
upper = xmax if xmax > upper else upper
x = np.linspace(lower, upper, samples)
# Iterate to plot all plots
for plot in plots:
# Deal with discrete data sets when no range is given
if noRangeSpecified and not callable(plot[0].source):
ax.plot(plot[0][:, 0], plot[0][:, 1], label=plot[1])
if forcePoints:
ax.scatter(plot[0][:, 0], plot[0][:, 1], marker="o")
else:
# Calculate function at mesh nodes
y = plot[0].getValue(x.tolist())
# Plots function
ax.plot(x, y, label=plot[1])
if forcePoints:
ax.scatter(x, y, marker="o")
# Plot data points if specified
if forceData:
for plot in plots:
if not callable(plot[0].source):
xData = plot[0].source[:, 0]
xmin, xmax = xData[0], xData[-1]
tooLow = True if xmin >= lower else False
tooHigh = True if xmax <= upper else False
loInd = 0 if tooLow else np.where(xData >= lower)[0][0]
upInd = (
len(xData) - 1 if tooHigh else np.where(xData <= upper)[0][0]
)
points = plot[0].source[loInd : (upInd + 1), :].T.tolist()
ax.scatter(points[0], points[1], marker="o")
# Setup legend
ax.legend(loc="best", shadow=True)
# Turn on grid and set title and axis
plt.grid(True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Show plot
plt.show()
if returnObject:
return fig, ax
# Define all interpolation methods
def __interpolatePolynomial__(self):
"""Calculate polynomail coefficients that fit the data exactly."""
# Find the degree of the polynomial interpolation
degree = self.source.shape[0] - 1
# Get x and y values for all supplied points.
x = self.source[:, 0]
y = self.source[:, 1]
# Check if interpolation requires large numbers
if np.amax(x) ** degree > 1e308:
print(
"Polynomial interpolation of too many points can't be done."
" Once the degree is too high, numbers get too large."
" The process becomes inefficient. Using spline instead."
)
return self.setInterpolation("spline")
# Create coefficient matrix1
A = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
A[:, i] = x ** i
# Solve the system and store the resultant coefficients
self.__polynomialCoefficients__ = np.linalg.solve(A, y)
def __interpolateSpline__(self):
"""Calculate natural spline coefficients that fit the data exactly."""
# Get x and y values for all supplied points
x = self.source[:, 0]
y = self.source[:, 1]
mdim = len(x)
h = [x[i + 1] - x[i] for i in range(0, mdim - 1)]
# Initialize the matrix
Ab = np.zeros((3, mdim))
# Construct the Ab banded matrix and B vector
Ab[1, 0] = 1 # A[0, 0] = 1
B = [0]
for i in range(1, mdim - 1):
Ab[2, i - 1] = h[i - 1] # A[i, i - 1] = h[i - 1]
Ab[1, i] = 2 * (h[i] + h[i - 1]) # A[i, i] = 2*(h[i] + h[i - 1])
Ab[0, i + 1] = h[i] # A[i, i + 1] = h[i]
B.append(3 * ((y[i + 1] - y[i]) / (h[i]) - (y[i] - y[i - 1]) / (h[i - 1])))
Ab[1, mdim - 1] = 1 # A[-1, -1] = 1
B.append(0)
# Solve the system for c coefficients
c = linalg.solve_banded((1, 1), Ab, B, True, True)
# Calculate other coefficients
b = [
((y[i + 1] - y[i]) / h[i] - h[i] * (2 * c[i] + c[i + 1]) / 3)
for i in range(0, mdim - 1)
]
d = [(c[i + 1] - c[i]) / (3 * h[i]) for i in range(0, mdim - 1)]
# Store coefficients
self.__splineCoefficients__ = np.array([y[0:-1], b, c[0:-1], d])
def __interpolateAkima__(self):
"""Calculate akima spline coefficients that fit the data exactly"""
# Get x and y values for all supplied points
x = self.source[:, 0]
y = self.source[:, 1]
# Estimate derivatives at each point
d = [0] * len(x)
d[0] = (y[1] - y[0]) / (x[1] - x[0])
d[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
for i in range(1, len(x) - 1):
w1, w2 = (x[i] - x[i - 1]), (x[i + 1] - x[i])
d1, d2 = ((y[i] - y[i - 1]) / w1), ((y[i + 1] - y[i]) / w2)
d[i] = (w1 * d2 + w2 * d1) / (w1 + w2)
# Calculate coefficients for each interval with system already solved
coeffs = [0] * 4 * (len(x) - 1)
for i in range(len(x) - 1):
xl, xr = x[i], x[i + 1]
yl, yr = y[i], y[i + 1]
dl, dr = d[i], d[i + 1]
A = np.array(
[
[1, xl, xl ** 2, xl ** 3],
[1, xr, xr ** 2, xr ** 3],
[0, 1, 2 * xl, 3 * xl ** 2],
[0, 1, 2 * xr, 3 * xr ** 2],
]
)
Y = np.array([yl, yr, dl, dr]).T
coeffs[4 * i : 4 * i + 4] = np.linalg.solve(A, Y)
"""For some reason this doesn't always work!
coeffs[4*i] = (dr*xl**2*xr*(-xl + xr) + dl*xl*xr**2*(-xl + xr) +
3*xl*xr**2*yl - xr**3*yl + xl**3*yr -
3*xl**2*xr*yr)/(xl-xr)**3
coeffs[4*i+1] = (dr*xl*(xl**2 + xl*xr - 2*xr**2) -
xr*(dl*(-2*xl**2 + xl*xr + xr**2) +
6*xl*(yl - yr)))/(xl-xr)**3
coeffs[4*i+2] = (-dl*(xl**2 + xl*xr - 2*xr**2) +
dr*(-2*xl**2 + xl*xr + xr**2) +
3*(xl + xr)*(yl - yr))/(xl-xr)**3
coeffs[4*i+3] = (dl*(xl - xr) + dr*(xl - xr) -
2*yl + 2*yr)/(xl-xr)**3"""
self.__akimaCoefficients__ = coeffs
# Define all possible algebraic operations
def __truediv__(self, other):
"""Devides a Function object and returns a new Function object
which gives the result of the division. Only implemented for 1D
domains.
Parameters
----------
other : Function, int, float, callable
What self will be divided by. If other and self are Function
objects which are based on interpolation, have the exact same
domain (are defined in the same grid points), have the same
interpolation method and have the same input name, then a
special implementation is used. This implementation is faster,
however behavior between grid points is only interpolated,
not calculated as it would be.
Returns
-------
result : Function
A Function object which gives the result of self(x)/other(x).
"""
# If other is Function try...
try:
# Check if Function objects source is array or callable
# Check if Function objects have same interpolation and domain
if (
isinstance(other.source, np.ndarray)
and isinstance(self.source, np.ndarray)
and self.__interpolation__ == other.__interpolation__
and self.__inputs__ == other.__inputs__
and np.any(self.source[:, 0] - other.source[:, 0]) == False
):
# Operate on grid values
Ys = self.source[:, 1] / other.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "/" + other.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValueOpt2(x) / other(x)))
# If other is Float except...
except:
if isinstance(other, (float, int, complex)):
# Check if Function object source is array or callable
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = self.source[:, 1] / other
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "/" + str(other)
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValueOpt2(x) / other))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (self.getValueOpt2(x) / other(x)))
def __rtruediv__(self, other):
"""Devides 'other' by a Function object and returns a new Function
object which gives the result of the division. Only implemented for
1D domains.
Parameters
----------
other : int, float, callable
What self will divide.
Returns
-------
result : Function
A Function object which gives the result of other(x)/self(x).
"""
# Check if Function object source is array and other is float
if isinstance(other, (float, int, complex)):
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = other / self.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = str(other) + "/" + self.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (other / self.getValueOpt2(x)))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (other(x) / self.getValueOpt2(x)))
def __pow__(self, other):
"""Raises a Function object to the power of 'other' and
returns a new Function object which gives the result. Only
implemented for 1D domains.
Parameters
----------
other : Function, int, float, callable
What self will be raised to. If other and self are Function
objects which are based on interpolation, have the exact same
domain (are defined in the same grid points), have the same
interpolation method and have the same input name, then a
special implementation is used. This implementation is faster,
however behavior between grid points is only interpolated,
not calculated as it would be.
Returns
-------
result : Function
A Function object which gives the result of self(x)**other(x).
"""
# If other is Function try...
try:
# Check if Function objects source is array or callable
# Check if Function objects have same interpolation and domain
if (
isinstance(other.source, np.ndarray)
and isinstance(self.source, np.ndarray)
and self.__interpolation__ == other.__interpolation__
and self.__inputs__ == other.__inputs__
and np.any(self.source[:, 0] - other.source[:, 0]) == False
):
# Operate on grid values
Ys = self.source[:, 1] ** other.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "**" + other.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValueOpt2(x) ** other(x)))
# If other is Float except...
except:
if isinstance(other, (float, int, complex)):
# Check if Function object source is array or callable
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = self.source[:, 1] ** other
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "**" + str(other)
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) ** other))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (self.getValue(x) ** other(x)))
def __rpow__(self, other):
"""Raises 'other' to the power of a Function object and returns
a new Function object which gives the result. Only implemented
for 1D domains.
Parameters
----------
other : int, float, callable
What self will exponentiate.
Returns
-------
result : Function
A Function object which gives the result of other(x)**self(x).
"""
# Check if Function object source is array and other is float
if isinstance(other, (float, int, complex)):
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = other ** self.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = str(other) + "**" + self.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (other ** self.getValue(x)))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (other(x) ** self.getValue(x)))
def __mul__(self, other):
"""Multiplies a Function object and returns a new Function object
which gives the result of the multiplication. Only implemented for 1D
domains.
Parameters
----------
other : Function, int, float, callable
What self will be multiplied by. If other and self are Function
objects which are based on interpolation, have the exact same
domain (are defined in the same grid points), have the same
interpolation method and have the same input name, then a
special implementation is used. This implementation is faster,
however behavior between grid points is only interpolated,
not calculated as it would be.
Returns
-------
result : Function
A Function object which gives the result of self(x)*other(x).
"""
# If other is Function try...
try:
# Check if Function objects source is array or callable
# Check if Function objects have same interpolation and domain
if (
isinstance(other.source, np.ndarray)
and isinstance(self.source, np.ndarray)
and self.__interpolation__ == other.__interpolation__
and self.__inputs__ == other.__inputs__
and np.any(self.source[:, 0] - other.source[:, 0]) == False
):
# Operate on grid values
Ys = self.source[:, 1] * other.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "*" + other.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) * other(x)))
# If other is Float except...
except:
if isinstance(other, (float, int, complex)):
# Check if Function object source is array or callable
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = self.source[:, 1] * other
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + "*" + str(other)
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) * other))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (self.getValue(x) * other(x)))
def __rmul__(self, other):
"""Multiplies 'other' by a Function object and returns a new Function
object which gives the result of the multiplication. Only implemented for
1D domains.
Parameters
----------
other : int, float, callable
What self will be multiplied by.
Returns
-------
result : Function
A Function object which gives the result of other(x)*self(x).
"""
# Check if Function object source is array and other is float
if isinstance(other, (float, int, complex)):
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = other * self.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = str(other) + "*" + self.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (other * self.getValue(x)))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (other(x) * self.getValue(x)))
def __add__(self, other):
"""Sums a Function object and 'other', returns a new Function
object which gives the result of the sum. Only implemented for
1D domains.
Parameters
----------
other : Function, int, float, callable
What self will be added to. If other and self are Function
objects which are based on interpolation, have the exact same
domain (are defined in the same grid points), have the same
interpolation method and have the same input name, then a
special implementation is used. This implementation is faster,
however behavior between grid points is only interpolated,
not calculated as it would be.
Returns
-------
result : Function
A Function object which gives the result of self(x)+other(x).
"""
# If other is Function try...
try:
# Check if Function objects source is array or callable
# Check if Function objects have same interpolation and domain
if (
isinstance(other.source, np.ndarray)
and isinstance(self.source, np.ndarray)
and self.__interpolation__ == other.__interpolation__
and self.__inputs__ == other.__inputs__
and np.any(self.source[:, 0] - other.source[:, 0]) == False
):
# Operate on grid values
Ys = self.source[:, 1] + other.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + " + " + other.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) + other(x)))
# If other is Float except...
except:
if isinstance(other, (float, int, complex)):
# Check if Function object source is array or callable
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = self.source[:, 1] + other
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + " + " + str(other)
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) + other))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (self.getValue(x) + other(x)))
def __radd__(self, other):
"""Sums 'other' and a Function object and returns a new Function
object which gives the result of the sum. Only implemented for
1D domains.
Parameters
----------
other : int, float, callable
What self will be added to.
Returns
-------
result : Function
A Function object which gives the result of other(x)/+self(x).
"""
# Check if Function object source is array and other is float
if isinstance(other, (float, int, complex)):
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = other + self.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = str(other) + " + " + self.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (other + self.getValue(x)))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (other(x) + self.getValue(x)))
def __sub__(self, other):
"""Subtracts from a Function object and returns a new Function object
which gives the result of the subtraction. Only implemented for 1D
domains.
Parameters
----------
other : Function, int, float, callable
What self will be subtracted by. If other and self are Function
objects which are based on interpolation, have the exact same
domain (are defined in the same grid points), have the same
interpolation method and have the same input name, then a
special implementation is used. This implementation is faster,
however behavior between grid points is only interpolated,
not calculated as it would be.
Returns
-------
result : Function
A Function object which gives the result of self(x)-other(x).
"""
# If other is Function try...
try:
# Check if Function objects source is array or callable
# Check if Function objects have same interpolation and domain
if (
isinstance(other.source, np.ndarray)
and isinstance(self.source, np.ndarray)
and self.__interpolation__ == other.__interpolation__
and self.__inputs__ == other.__inputs__
and np.any(self.source[:, 0] - other.source[:, 0]) == False
):
# Operate on grid values
Ys = self.source[:, 1] - other.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + " - " + other.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) * other(x)))
# If other is Float except...
except:
if isinstance(other, (float, int, complex)):
# Check if Function object source is array or callable
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = self.source[:, 1] - other
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = self.__outputs__[0] + " - " + str(other)
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (self.getValue(x) - other))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (self.getValue(x) - other(x)))
def __rsub__(self, other):
"""Subtracts a Function object from 'other' and returns a new Function
object which gives the result of the subtraction. Only implemented for
1D domains.
Parameters
----------
other : int, float, callable
What self will subtract from.
Returns
-------
result : Function
A Function object which gives the result of other(x)-self(x).
"""
# Check if Function object source is array and other is float
if isinstance(other, (float, int, complex)):
if isinstance(self.source, np.ndarray):
# Operate on grid values
Ys = other - self.source[:, 1]
Xs = self.source[:, 0]
source = np.concatenate(([Xs], [Ys])).transpose()
# Retrieve inputs, outputs and interpolation
inputs = self.__inputs__[:]
outputs = str(other) + " - " + self.__outputs__[0]
outputs = "(" + outputs + ")"
interpolation = self.__interpolation__
# Create new Function object
return Function(source, inputs, outputs, interpolation)
else:
return Function(lambda x: (other - self.getValue(x)))
# Or if it is just a callable
elif callable(other):
return Function(lambda x: (other(x) - self.getValue(x)))
def integral(self, a, b, numerical=False):
"""Evaluate a definite integral of a 1-D Function in the interval
from a to b.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
numerical : bool
If True, forces the definite integral to be evaluated numerically.
The current numerical method used is scipy.integrate.quad.
If False, try to calculate using interpolation information.
Currently, only available for spline and linear interpolation. If
unavailable, calculate numerically anyways.
Returns
-------
ans : float
Evaluated integral.
"""
if self.__interpolation__ == "spline" and numerical is False:
# Integrate using spline coefficients
xData = self.source[:, 0]
yData = self.source[:, 1]
coeffs = self.__splineCoefficients__
ans = 0
# Check to see if interval starts before point data
if a < xData[0]:
if self.__extrapolation__ == "constant":
ans += yData[0] * (xData[0] - a)
elif self.__extrapolation__ == "natural":
c = coeffs[:, 0]
subB = a - xData[0] # subA = 0
ans -= (
(c[3] * subB ** 4) / 4
+ (c[2] * subB ** 3 / 3)
+ (c[1] * subB ** 2 / 2)
+ c[0] * subB
)
else:
# self.__extrapolation__ = 'zero'
pass
# Integrate in subintervals between Xs of given data up to b
i = 0
while i < len(xData) - 1 and xData[i] < b:
if b < xData[i + 1]:
subB = b - xData[i] # subA = 0
else:
subB = xData[i + 1] - xData[i] # subA = 0
c = coeffs[:, i]
subB = xData[i + 1] - xData[i] # subA = 0
ans += (
(c[3] * subB ** 4) / 4
+ (c[2] * subB ** 3 / 3)
+ (c[1] * subB ** 2 / 2)
+ c[0] * subB
)
i += 1
# Check to see if interval ends after point data
if b > xData[-1]:
if self.__extrapolation__ == "constant":
ans += yData[-1] * (b - xData[-1])
elif self.__extrapolation__ == "natural":
c = coeffs[:, -1]
subA = xData[-1] - xData[-2]
subB = b - xData[-2]
ans -= (
(c[3] * subA ** 4) / 4
+ (c[2] * subA ** 3 / 3)
+ (c[1] * subA ** 2 / 2)
+ c[0] * subA
)
ans += (
(c[3] * subB ** 4) / 4
+ (c[2] * subB ** 3 / 3)
+ (c[1] * subB ** 2 / 2)
+ c[0] * subB
)
else:
# self.__extrapolation__ = 'zero'
pass
elif self.__interpolation__ == "linear" and numerical is False:
return np.trapz(self.source[:, 1], x=self.source[:, 0])
else:
# Integrate numerically
ans, _ = integrate.quad(self, a, b, epsabs=0.1, limit=10000)
return ans
# Not implemented
def differentiate(self, x, dx=1e-6):
return (self.getValue(x + dx) - self.getValue(x - dx)) / (2 * dx)
# h = (10)**-300
# z = x + h*1j
# return self(z).imag/h
| 43.204412 | 87 | 0.516298 |
acdebd65fca0bd638b171b62bcc2d06a29de9c84 | 6,660 | py | Python | aloe/rpc/full_node_rpc_client.py | Aloe-Network/aloe-blockchain | 72b1f64f177e144a81b9d38f194427ea39e16edb | [
"Apache-2.0"
] | 3 | 2021-06-03T09:09:28.000Z | 2021-07-24T16:22:40.000Z | aloe/rpc/full_node_rpc_client.py | Aloe-Network/aloe-blockchain | 72b1f64f177e144a81b9d38f194427ea39e16edb | [
"Apache-2.0"
] | null | null | null | aloe/rpc/full_node_rpc_client.py | Aloe-Network/aloe-blockchain | 72b1f64f177e144a81b9d38f194427ea39e16edb | [
"Apache-2.0"
] | 1 | 2021-07-14T04:15:26.000Z | 2021-07-14T04:15:26.000Z | from typing import Dict, List, Optional, Tuple
from aloe.consensus.block_record import BlockRecord
from aloe.rpc.rpc_client import RpcClient
from aloe.types.blockchain_format.sized_bytes import bytes32
from aloe.types.coin_record import CoinRecord
from aloe.types.full_block import FullBlock
from aloe.types.spend_bundle import SpendBundle
from aloe.types.unfinished_header_block import UnfinishedHeaderBlock
from aloe.util.byte_types import hexstr_to_bytes
from aloe.util.ints import uint32, uint64
class FullNodeRpcClient(RpcClient):
"""
Client to Aloe RPC, connects to a local full node. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Aloe's
protocol on top of TCP), it's a separate protocol on top of HTTP thats provides easy access
to the full node.
"""
async def get_blockchain_state(self) -> Dict:
response = await self.fetch("get_blockchain_state", {})
if response["blockchain_state"]["peak"] is not None:
response["blockchain_state"]["peak"] = BlockRecord.from_json_dict(response["blockchain_state"]["peak"])
return response["blockchain_state"]
async def get_block(self, header_hash) -> Optional[FullBlock]:
try:
response = await self.fetch("get_block", {"header_hash": header_hash.hex()})
except Exception:
return None
return FullBlock.from_json_dict(response["block"])
async def get_block_record_by_height(self, height) -> Optional[BlockRecord]:
try:
response = await self.fetch("get_block_record_by_height", {"height": height})
except Exception:
return None
return BlockRecord.from_json_dict(response["block_record"])
async def get_block_record(self, header_hash) -> Optional[BlockRecord]:
try:
response = await self.fetch("get_block_record", {"header_hash": header_hash.hex()})
if response["block_record"] is None:
return None
except Exception:
return None
return BlockRecord.from_json_dict(response["block_record"])
async def get_unfinished_block_headers(self) -> List[UnfinishedHeaderBlock]:
response = await self.fetch("get_unfinished_block_headers", {})
return [UnfinishedHeaderBlock.from_json_dict(r) for r in response["headers"]]
async def get_all_block(self, start: uint32, end: uint32) -> List[FullBlock]:
response = await self.fetch("get_blocks", {"start": start, "end": end, "exclude_header_hash": True})
return [FullBlock.from_json_dict(r) for r in response["blocks"]]
async def get_network_space(
self, newer_block_header_hash: bytes32, older_block_header_hash: bytes32
) -> Optional[uint64]:
try:
network_space_bytes_estimate = await self.fetch(
"get_network_space",
{
"newer_block_header_hash": newer_block_header_hash.hex(),
"older_block_header_hash": older_block_header_hash.hex(),
},
)
except Exception:
return None
return network_space_bytes_estimate["space"]
async def get_coin_records_by_puzzle_hash(
self,
puzzle_hash: bytes32,
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
d = {"puzzle_hash": puzzle_hash.hex(), "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_puzzle_hash", d))["coin_records"]
]
async def get_coin_records_by_puzzle_hashes(
self,
puzzle_hashes: List[bytes32],
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
puzzle_hashes_hex = [ph.hex() for ph in puzzle_hashes]
d = {"puzzle_hashes": puzzle_hashes_hex, "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_puzzle_hashes", d))["coin_records"]
]
async def get_additions_and_removals(self, header_hash: bytes32) -> Tuple[List[CoinRecord], List[CoinRecord]]:
try:
response = await self.fetch("get_additions_and_removals", {"header_hash": header_hash.hex()})
except Exception:
return [], []
removals = []
additions = []
for coin_record in response["removals"]:
removals.append(CoinRecord.from_json_dict(coin_record))
for coin_record in response["additions"]:
additions.append(CoinRecord.from_json_dict(coin_record))
return additions, removals
async def get_block_records(self, start: int, end: int) -> List:
try:
response = await self.fetch("get_block_records", {"start": start, "end": end})
if response["block_records"] is None:
return []
except Exception:
return []
# TODO: return block records
return response["block_records"]
async def push_tx(self, spend_bundle: SpendBundle):
return await self.fetch("push_tx", {"spend_bundle": spend_bundle.to_json_dict()})
async def get_all_mempool_tx_ids(self) -> List[bytes32]:
response = await self.fetch("get_all_mempool_tx_ids", {})
return [bytes32(hexstr_to_bytes(tx_id_hex)) for tx_id_hex in response["tx_ids"]]
async def get_all_mempool_items(self) -> Dict[bytes32, Dict]:
response: Dict = await self.fetch("get_all_mempool_items", {})
converted: Dict[bytes32, Dict] = {}
for tx_id_hex, item in response["mempool_items"].items():
converted[bytes32(hexstr_to_bytes(tx_id_hex))] = item
return converted
async def get_mempool_item_by_tx_id(self, tx_id: bytes32) -> Optional[Dict]:
try:
response = await self.fetch("get_mempool_item_by_tx_id", {"tx_id": tx_id.hex()})
return response["mempool_item"]
except Exception:
return None
| 43.529412 | 115 | 0.65991 |
acdebde692e7fb18c653e43e492049b60fcef200 | 2,002 | py | Python | test/dlc_tests/sanity/test_git_secrets.py | arjkesh/deep-learning-containers-1 | 9bf65197dee8a6f59b3d4ee240dcc11824240854 | [
"Apache-2.0"
] | null | null | null | test/dlc_tests/sanity/test_git_secrets.py | arjkesh/deep-learning-containers-1 | 9bf65197dee8a6f59b3d4ee240dcc11824240854 | [
"Apache-2.0"
] | null | null | null | test/dlc_tests/sanity/test_git_secrets.py | arjkesh/deep-learning-containers-1 | 9bf65197dee8a6f59b3d4ee240dcc11824240854 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import sys
import pytest
from invoke.context import Context
from test.test_utils import is_pr_context, PR_ONLY_REASON
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler(sys.stderr))
def _recursive_find_repo_path():
pwd = os.getcwd()
repository_path = pwd
while os.path.basename(repository_path) != "deep-learning-containers":
repository_path = os.path.dirname(repository_path)
if repository_path == "/":
raise EnvironmentError(f"Repository path could not be found from {pwd}")
return repository_path
@pytest.mark.skipif(not is_pr_context(), reason=PR_ONLY_REASON)
def test_git_secrets():
ctx = Context()
repository_path = os.getenv("CODEBUILD_SRC_DIR")
if not repository_path:
repository_path = _recursive_find_repo_path()
LOGGER.info(f"repository_path = {repository_path}")
# Replace the regex pattern below with a matching string to run test that makes scan fail:
SOME_FAKE_CREDENTIALS = "ASIA[A-Z0-9]{16}"
WHITELISTED_CREDENTIALS = "AKIAIOSFODNN7EXAMPLE"
# End of Test Section
with ctx.cd(repository_path):
ctx.run("git clone https://github.com/awslabs/git-secrets.git")
with ctx.cd("git-secrets"):
ctx.run("make install")
ctx.run("git secrets --install")
ctx.run("git secrets --register-aws")
output = ctx.run("git secrets --list")
LOGGER.info(f"\n--COMMAND--\n{output.command}\n"
f"--STDOUT--\n{output.stdout}\n"
f"--STDERR--\n{output.stderr}\n"
f"----------")
scan_results = ctx.run("git secrets --scan", hide=True, warn=True)
LOGGER.info(f"\n--COMMAND--\n{scan_results.command}\n"
f"--STDOUT--\n{scan_results.stdout}\n"
f"--STDERR--\n{scan_results.stderr}"
f"----------")
assert scan_results.ok, scan_results.stderr
| 35.75 | 94 | 0.649351 |
acdebe4bbd8da74ca1e279b125355efdd48007ce | 4,905 | py | Python | rynda/users/tests/xUnit/test_models.py | sarutobi/ritmserdtsa | 49ca83e19cad0cd92deecc4baf86d653ac0188fa | [
"MIT"
] | null | null | null | rynda/users/tests/xUnit/test_models.py | sarutobi/ritmserdtsa | 49ca83e19cad0cd92deecc4baf86d653ac0188fa | [
"MIT"
] | null | null | null | rynda/users/tests/xUnit/test_models.py | sarutobi/ritmserdtsa | 49ca83e19cad0cd92deecc4baf86d653ac0188fa | [
"MIT"
] | null | null | null | # coding: utf-8
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from django.test import TestCase
from post_office.models import Email, EmailTemplate
from rynda.test.factories import UserFactory
from rynda.users.models import (
UserAuthCode, create_new_user, activate_user, list_public_users)
class UserAuthCodeTest(TestCase):
def setUp(self):
self.encoder = UserAuthCode('secret')
self.user = UserFactory(is_active=False)
def test_user(self):
self.assertIsNotNone(self.user.date_joined)
self.assertTrue(self.user.date_joined >= self.user.last_login)
def test_salt(self):
salt = self.encoder.salt()
self.assertEqual(8, len(salt))
def test_auth_code(self):
code = self.encoder.auth_code(self.user)
self.assertIsNotNone(code)
def test_complete_activation(self):
code = self.encoder.auth_code(self.user)
self.assertTrue(self.encoder.is_valid(self.user, code))
def test_wrong_key(self):
self.assertFalse(self.encoder.is_valid(self.user, 'aaa'))
def test_already_activated(self):
code = self.encoder.auth_code(self.user)
self.user.last_login = timezone.now()
self.user.save()
self.assertFalse(self.encoder.is_valid(self.user, code))
class UserTest(TestCase):
""" User-specific tests """
def setUp(self):
self.user = UserFactory.build(
first_name='Boy',
last_name='Factory'
)
def test_user(self):
self.assertNotEqual(None, self.user)
def test_user_first_name(self):
self.assertEqual('Boy', self.user.first_name)
def test_user_last_name(self):
self.assertEqual('Factory', self.user.last_name)
def test_user_email(self):
self.assertEqual('boy_factory@mail.ru', self.user.email)
class TestUserCreation(TestCase):
def setUp(self):
user = UserFactory.build()
confirm = EmailTemplate(
name='registration confirmation',
subject='Account activation',
)
confirm.save()
self.before = User.objects.count()
self.user = create_new_user(
first_name=user.first_name,
last_name=user.last_name,
email=user.email,
password='123'
)
def test_create_new_user(self):
self.assertEqual(self.before + 1, User.objects.all().count())
def test_user_password(self):
u = User.objects.get(email=self.user.email)
self.assertTrue(u.check_password('123'))
def test_user_staff(self):
u = User.objects.get(email=self.user.email)
self.assertFalse(u.is_staff)
def test_user_active(self):
u = User.objects.get(email=self.user.email)
self.assertFalse(u.is_active)
def test_send_email(self):
emails_count = Email.objects.count()
self.assertEqual(1, emails_count)
def test_email_subject(self):
mail = Email.objects.get()
self.assertEqual(
mail.subject,
u'Account activation',
mail.subject
)
def test_profile_creation(self):
""" User mist have profile """
self.assertIsNotNone(self.user.profile)
class TestUserActivation(TestCase):
def setUp(self):
encoder = UserAuthCode(settings.SECRET_KEY)
self.user = UserFactory(is_active=False)
self.code = encoder.auth_code(self.user)
confirm = EmailTemplate(
name='registration confirmation',
subject='Account activation',
)
confirm.save()
complete = EmailTemplate(
name='registration complete',
)
complete.save()
def test_user_activation(self):
self.assertTrue(activate_user(self.user, self.code), self.user.email)
self.assertTrue(self.user.is_active)
def test_wrong_code(self):
self.assertFalse(activate_user(self.user, 'self.code'))
self.assertFalse(self.user.is_active)
class TestPublicUsers(TestCase):
""" Tests for select only public users """
def setUp(self):
for x in xrange(10):
u = UserFactory(is_active=True)
u.profile.is_public = True
u.profile.save()
def test_active_public(self):
""" Select only active and public """
self.assertEquals(10, len(list_public_users()))
def test_not_public(self):
""" Do not show active users without public flag """
u = UserFactory(is_active=True)
u.profile.is_public = False
u.profile.save()
u.save()
self.assertEquals(10, len(list_public_users()))
def test_not_active(self):
u = UserFactory(is_active=False)
u.profile.is_public = True
u.profile.save()
self.assertEquals(10, len(list_public_users()))
| 29.548193 | 77 | 0.643833 |
acdebf5ad70ca756f2dccf984214b2f908a1d988 | 1,654 | py | Python | A/A_Counterexample.py | panch-1/Codeforces_Solutions | d4d18cb0e613135b78ed4e2325b0afbe8aae005d | [
"MIT"
] | 1 | 2021-08-09T19:47:26.000Z | 2021-08-09T19:47:26.000Z | A/A_Counterexample.py | panch-1/Codeforces_Solutions | d4d18cb0e613135b78ed4e2325b0afbe8aae005d | [
"MIT"
] | null | null | null | A/A_Counterexample.py | panch-1/Codeforces_Solutions | d4d18cb0e613135b78ed4e2325b0afbe8aae005d | [
"MIT"
] | null | null | null | import sys,math,io,os,time,itertools,collections
mod=10**9+7
sys.setrecursionlimit(10000)
i=sys.stdin.readline
p=sys.stdout.write
#use sys.stdout.write() (remember to convert to str b4 and concatenate "\n")
global start,end
#binary search
def isin(l,x):
left=0
right=len(l)-1
if x<l[0]:
return -1
while left<=right:
mid=left + (right -left)//2
if l[mid]==x:
return mid
elif l[mid]<x:
ans=mid
left=mid+1
else:
right=mid-1
return ans
#is palindrome or not
def ispal(l):
n=len(l)
for i in range(n//2+1):
if l[i]!=l[n-i-1]:
return False
return True
#coordinate compression
def ccarray(l):
d={l[k]:k for k in range(len(l))}
m=sorted(d)
return [d[m[k]] for k in range(len(l))]
#checks if prime or not
def is_prime(n):
if n<=3:
return n>1
if n%2==0 or n%3==0:
return False
k=5
while k**2<=n:
if n%k==0 or n%(k+2)==0:
return False
k+=6
return True
#sieve of eratosthenes
def sieve(n):
prime=[True for k in range(n+1)]
p=2
while p*p<=n:
if prime[p]==True:
for k in range(p*p,n+1,p):
prime[k]=False
p+=1
def main():
l,r=[int(k) for k in i().split()]
a=l
if l&1:
a+=1
b=a+1
c=b+1
if c>r:
p("-1\n")
else:
p(str(a)+" "+str(b)+" "+str(c)+"\n")
t=1
#t=int(i())
start=time.perf_counter()
for _ in range(t):
main()
end=time.perf_counter()
#print(end-start) | 20.419753 | 77 | 0.495768 |
acdebfd86dadfad8867a3d7ab875b9f9811b4652 | 1,334 | py | Python | classes/args.py | MattTheCoder-W/yt-music-cracker | 55fe77ff34ad3e7af38a8ca4d08db5b9749bc658 | [
"Apache-2.0"
] | null | null | null | classes/args.py | MattTheCoder-W/yt-music-cracker | 55fe77ff34ad3e7af38a8ca4d08db5b9749bc658 | [
"Apache-2.0"
] | null | null | null | classes/args.py | MattTheCoder-W/yt-music-cracker | 55fe77ff34ad3e7af38a8ca4d08db5b9749bc658 | [
"Apache-2.0"
] | null | null | null | import argparse
from classes.interactive import Interactive
from classes.colors import message
class Arguments:
def __init__(self):
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--interactive', action="store_true", help="Interactive mode of this script")
ap.add_argument('-p', '--playlist-link', type=str, help='Link for playlist on youtube')
ap.add_argument('-f', '--format', type=str, help='File format (flac/mp3)')
ap.add_argument('-a', '--artist', type=str, help='Artist of album')
ap.add_argument('-t', '--album-title', type=str, help='Title of the album')
ap.add_argument('-c', '--cover', type=str, help='Cover image file or link to online cover')
ap.add_argument('-s', '--skip', type=str, help='Phrase to skip in titles of songs, if you want to skip more than one, use " && " separator between them.')
self.args = ap.parse_args()
self.interactive = self.args.interactive
self.args = vars(self.args)
if self.interactive:
args = Interactive().getargs()
self.args = args
elif self.args['format'] not in ['flac', 'mp3']:
message(f"Format {self.args['format']} not found!", "red")
exit()
def getargs(self):
return self.args
| 46 | 163 | 0.608696 |
acdebfe2b4c03c7b87e516b75d8e3b50cd7e9092 | 5,917 | py | Python | Blender.Client/batchapps_blender/props/props_assets.py | cenit/azure-batch-apps-blender | e17e43cbb36f6720bce22b18d9fcf01b1b7cd3a4 | [
"MIT"
] | 29 | 2015-01-28T14:46:05.000Z | 2021-12-11T20:13:07.000Z | Blender.Client/batchapps_blender/props/props_assets.py | cenit/azure-batch-apps-blender | e17e43cbb36f6720bce22b18d9fcf01b1b7cd3a4 | [
"MIT"
] | 16 | 2015-02-07T17:35:43.000Z | 2021-02-24T04:09:25.000Z | Blender.Client/batchapps_blender/props/props_assets.py | cenit/azure-batch-apps-blender | e17e43cbb36f6720bce22b18d9fcf01b1b7cd3a4 | [
"MIT"
] | 14 | 2015-01-28T11:13:41.000Z | 2022-01-09T21:20:18.000Z | #-------------------------------------------------------------------------
#
# Batch Apps Blender Addon
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
import bpy
@bpy.app.handlers.persistent
def on_load(*args):
"""
Handler to ensure assets list is reset inbetween .blend files.
Also resets the job file path inbetween blend files.
Run on blend file load.
"""
bpy.context.scene.batchapps_assets.path = ""
if bpy.context.scene.batchapps_session.page == "ASSETS":
bpy.ops.batchapps_assets.refresh()
def format_date(asset):
"""
Format an assets last modified date for the UI.
:Args:
- asset (:class:`batchapps.files.UserFile`): Asset whos date we
want to format.
:Returns:
- The last modified date as a string. If formatting fails,
an empty string.
"""
try:
datelist = asset.get_last_modified().split('T')
datelist[1] = datelist[1].split('.')[0]
return ' '.join(datelist)
except:
bpy.context.scene.batchapps_session.log.debug(
"Couldn't format date {0}.".format(asset.get_last_modified()))
return ""
class AssetDisplayProps(bpy.types.PropertyGroup):
"""
A display object representing an asset.
Displayed by :class:`.ui_assets.AssetListUI`.
"""
name = bpy.props.StringProperty(
description="Asset filename")
fullpath = bpy.props.StringProperty(
description="Asset full path")
upload_checkbox = bpy.props.BoolProperty(
description = "Check to upload asset",
default = False)
upload_check = bpy.props.BoolProperty(
description="Selected for upload",
default=False)
timestamp = bpy.props.StringProperty(
description="Asset last modified timestamp",
default="")
class AssetProps(bpy.types.PropertyGroup):
"""
Asset Properties,
Once instantiated, this class is set to both the Blender context, and
assigned to assets.BatchAppsAssets.props.
"""
collection = []
path = bpy.props.StringProperty(
description="Blend file path to be rendered")
temp = bpy.props.BoolProperty(
description="Whether we're using a temp blend file",
default=False)
assets = bpy.props.CollectionProperty(
type=AssetDisplayProps,
description="Asset display list")
index = bpy.props.IntProperty(
description="Selected asset index")
def add_asset(self, asset):
"""
Add an asset to both the display and object lists.
"""
log = bpy.context.scene.batchapps_session.log
log.debug("Adding asset to ui list {0}.".format(asset.name))
uploaded = asset.is_uploaded()
self.collection.append(asset)
self.assets.add()
entry = self.assets[-1]
entry.name = asset.name
entry.timestamp = format_date(asset)
entry.fullpath = asset.path
entry.upload_check = False if uploaded is None else True
log.debug("Total assets now {0}.".format(len(self.assets)))
def remove_selected(self):
"""
Remove selected asset from both display and object lists.
"""
bpy.context.scene.batchapps_session.log.debug(
"Removing index {0}.".format(self.index))
self.collection.pop(self.index)
self.assets.remove(self.index)
self.index = max(self.index - 1, 0)
def get_jobfile(self):
"""
Get the asset object whos path is the job file path.
"""
log = bpy.context.scene.batchapps_session.log
for asset in self.collection:
if asset.path == self.path:
log.debug("Found job asset at {0}".format(self.path))
return asset
else:
log.debug("Found no job asset, using {0}".format(self.path))
raise ValueError("Job Asset not in collection")
def reset(self):
"""
Clear both asset display and object lists.
"""
self.collection.clear()
self.assets.clear()
self.index = 0
bpy.context.scene.batchapps_session.log.debug("Reset asset lists.")
def set_uploaded(self):
"""
Mark all assets as having been uploaded.
"""
for asset in self.assets:
asset.upload_check = True
def register_props():
"""
Register the asset property classes and assign to the blender
context under "batchapps_assets".
:Returns:
- A :class:`.AssetProps` object
"""
bpy.types.Scene.batchapps_assets = \
bpy.props.PointerProperty(type=AssetProps)
bpy.app.handlers.load_post.append(on_load)
return bpy.context.scene.batchapps_assets | 30.5 | 81 | 0.636809 |
acdec0ea3cf54c7f1d1a923758c36108f53a423d | 4,163 | py | Python | ribosome/compute/ribosome_api.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/compute/ribosome_api.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/compute/ribosome_api.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | import abc
from typing import TypeVar, Callable, Type
from amino.lenses.lens import lens
from amino import do, Do, _, Either
from ribosome.nvim.io.state import NS
from ribosome.compute.ribosome import Ribosome
from ribosome.config.setting import Setting
from ribosome.nvim.io.compute import NvimIO, NRParams
from ribosome.compute.prog import Prog, ProgExec
from ribosome.compute.output import ProgOutputResult
from ribosome.compute.tpe_data import StateProg, trivial_state_prog, ribo_state_prog
from ribosome.compute.wrap import prog_wrappers
from ribosome.data.plugin_state import PS
from ribosome.nvim.api.command import doautocmd
A = TypeVar('A')
D = TypeVar('D')
M = TypeVar('M')
CC = TypeVar('CC')
C = TypeVar('C')
R = TypeVar('R')
main_lens = lens.state.data
class RMeta(abc.ABCMeta):
pass
class Ribo(metaclass=RMeta):
@classmethod
def setting_e(self, setting: Setting[A]) -> NS[Ribosome[D, CC, C], Either[str, A]]:
return NS.lift(setting.value_or_default_e())
@classmethod
def setting(self, setting: Setting[A]) -> NS[Ribosome[D, CC, C], A]:
return NS.lift(setting.value_or_default())
@classmethod
def setting_raw(self, setting: Setting[A]) -> NS[Ribosome[D, CC, C], Either[str, A]]:
return NS.lift(setting.value)
@classmethod
def setting_prog_e(self, setting: Setting[A]) -> Prog[Either[str, A]]:
return Ribo.lift(Ribo.setting_e(setting), None)
@classmethod
def setting_prog(self, setting: Setting[A]) -> Prog[A]:
return Ribo.lift(Ribo.setting(setting), None)
@classmethod
def comp(self) -> NS[Ribosome[D, CC, C], C]:
return NS.inspect(lambda a: a.comp_lens.get()(a))
@classmethod
def inspect_comp(self, f: Callable[[C], A]) -> NS[Ribosome[D, CC, C], A]:
return NS.inspect(lambda a: f(a.comp_lens.get()(a)))
@classmethod
def inspect_comp_e(self, f: Callable[[C], A]) -> NS[Ribosome[D, CC, C], A]:
return NS.inspect_either(lambda a: f(a.comp_lens.get()(a)))
@classmethod
def modify_comp(self, f: Callable[[C], C]) -> NS[Ribosome[D, CC, C], None]:
return NS.modify(lambda a: a.comp_lens.modify(f)(a))
@classmethod
def main(self) -> NS[Ribosome[D, CC, C], C]:
return NS.inspect(lambda a: main_lens.get()(a))
@classmethod
def inspect_main(self, f: Callable[[D], A]) -> NS[Ribosome[D, CC, C], A]:
return NS.inspect(lambda a: f(main_lens.get()(a)))
@classmethod
def modify_main(self, f: Callable[[D], D]) -> NS[Ribosome[D, CC, C], None]:
return NS.modify(lambda a: main_lens.modify(f)(a))
@classmethod
@do(NS[Ribosome[D, CC, C], A])
def zoom_main(self, fa: NS[D, A]) -> Do:
yield fa.zoom(main_lens)
@classmethod
@do(NS[Ribosome[D, CC, C], A])
def zoom_comp(self, fa: NS[C, A]) -> Do:
lens = yield NS.inspect(_.comp_lens)
yield fa.zoom(lens)
@classmethod
@do(Prog[A])
def lift_state_prog(self, fa: NS[Ribosome[D, CC, C], A], state_type: StateProg[M, C, R, A]) -> Do:
wrappers = yield Prog.from_either(prog_wrappers.match(state_type))
yield ProgExec('lift', fa, wrappers, ProgOutputResult())
@classmethod
@do(Prog[A])
def lift(self, fa: NS[Ribosome[D, CC, C], A], comp: Type[C]) -> Do:
state_type: StateProg[PS[D, CC], C, Ribosome[D, CC, C]] = ribo_state_prog(comp)
yield Ribo.lift_state_prog(fa, state_type)
@classmethod
def lift_comp(self, fa: NS[C, A], comp: Type[C]) -> Prog[A]:
return Ribo.lift(Ribo.zoom_comp(fa), comp)
@classmethod
def trivial(self, fa: NS[D, A]) -> Prog[A]:
return Ribo.lift_state_prog(fa, trivial_state_prog)
@classmethod
def lift_nvimio(self, fa: NvimIO[A]) -> Prog[A]:
return Ribo.trivial(NS.lift(fa))
@classmethod
def autocmd(self, name: str, verbose: bool=False) -> NS[Ribosome[D, CC, C], None]:
return NS.lift(doautocmd('User', name, params=NRParams.cons(verbose=verbose, sync=False)))
@classmethod
def autocmd_prog(self, name: str) -> Prog[None]:
return Ribo.lift(Ribo.autocmd(name), None)
__all__ = ('Ribosome', 'Ribo')
| 33.039683 | 102 | 0.652174 |
acdec197e5dde7f9e66677a989f2eb47ce4402c8 | 2,289 | py | Python | realtime_voice_conversion/config.py | karanokuri/realtime-yukarin | a8cd36b180c93d7251327b3ca4a027d2a9f0c868 | [
"MIT"
] | 296 | 2019-06-07T22:33:21.000Z | 2022-03-30T13:41:37.000Z | realtime_voice_conversion/config.py | Hiroshiba/realtime-voice-conversion | a8cd36b180c93d7251327b3ca4a027d2a9f0c868 | [
"MIT"
] | 6 | 2019-10-21T10:32:20.000Z | 2021-07-01T05:33:35.000Z | realtime_voice_conversion/config.py | karanokuri/realtime-yukarin | a8cd36b180c93d7251327b3ca4a027d2a9f0c868 | [
"MIT"
] | 38 | 2019-06-13T00:23:04.000Z | 2022-03-29T02:17:49.000Z | from enum import Enum
from pathlib import Path
from typing import NamedTuple, Dict, Any
import yaml
class VocodeMode(Enum):
WORLD = 'world'
CREPE = 'crepe'
class Config(NamedTuple):
input_device_name: str
output_device_name: str
input_rate: int
output_rate: int
frame_period: float
buffer_time: float
extract_f0_mode: VocodeMode
vocoder_buffer_size: int
input_scale: float
output_scale: float
input_silent_threshold: float
output_silent_threshold: float
encode_extra_time: float
convert_extra_time: float
decode_extra_time: float
input_statistics_path: Path
target_statistics_path: Path
stage1_model_path: Path
stage1_config_path: Path
stage2_model_path: Path
stage2_config_path: Path
@property
def in_audio_chunk(self):
return round(self.input_rate * self.buffer_time)
@property
def out_audio_chunk(self):
return round(self.output_rate * self.buffer_time)
@staticmethod
def from_yaml(path: Path):
d: Dict[str, Any] = yaml.safe_load(path.open())
return Config(
input_device_name=d['input_device_name'],
output_device_name=d['output_device_name'],
input_rate=d['input_rate'],
output_rate=d['output_rate'],
frame_period=d['frame_period'],
buffer_time=d['buffer_time'],
extract_f0_mode=VocodeMode(d['extract_f0_mode']),
vocoder_buffer_size=d['vocoder_buffer_size'],
input_scale=d['input_scale'],
output_scale=d['output_scale'],
input_silent_threshold=d['input_silent_threshold'],
output_silent_threshold=d['output_silent_threshold'],
encode_extra_time=d['encode_extra_time'],
convert_extra_time=d['convert_extra_time'],
decode_extra_time=d['decode_extra_time'],
input_statistics_path=Path(d['input_statistics_path']),
target_statistics_path=Path(d['target_statistics_path']),
stage1_model_path=Path(d['stage1_model_path']),
stage1_config_path=Path(d['stage1_config_path']),
stage2_model_path=Path(d['stage2_model_path']),
stage2_config_path=Path(d['stage2_config_path']),
)
| 31.791667 | 69 | 0.674967 |
acdec27c40b5b8d4fbef796624972d0baf3cedf2 | 10,341 | py | Python | tencentcloud/tms/v20201229/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | tencentcloud/tms/v20201229/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | tencentcloud/tms/v20201229/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class DetailResults(AbstractModel):
"""文本返回的详细结果
"""
def __init__(self):
"""
:param Label: 恶意标签,Normal:正常,Porn:色情,Abuse:谩骂,Ad:广告,Custom:自定义词库。
以及其他令人反感、不安全或不适宜的内容类型。
:type Label: str
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
注意:此字段可能返回 null,表示取不到有效值。
:type Suggestion: str
:param Keywords: 该标签下命中的关键词
注意:此字段可能返回 null,表示取不到有效值。
:type Keywords: list of str
:param Score: 该标签模型命中的分值
注意:此字段可能返回 null,表示取不到有效值。
:type Score: int
:param LibType: 仅当Label为Custom自定义关键词时有效,表示自定义关键词库类型,1:黑白库,2:自定义库
注意:此字段可能返回 null,表示取不到有效值。
:type LibType: int
:param LibId: 仅当Label为Custom自定义关键词时有效,表示自定义库id
注意:此字段可能返回 null,表示取不到有效值。
:type LibId: str
:param LibName: 仅当Labe为Custom自定义关键词时有效,表示自定义库名称
注意:此字段可能返回 null,表示取不到有效值。
:type LibName: str
"""
self.Label = None
self.Suggestion = None
self.Keywords = None
self.Score = None
self.LibType = None
self.LibId = None
self.LibName = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Suggestion = params.get("Suggestion")
self.Keywords = params.get("Keywords")
self.Score = params.get("Score")
self.LibType = params.get("LibType")
self.LibId = params.get("LibId")
self.LibName = params.get("LibName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Device(AbstractModel):
"""设备信息
"""
def __init__(self):
"""
:param IP: 用户IP
:type IP: str
:param Mac: Mac地址
:type Mac: str
:param TokenId: 设备指纹Token
:type TokenId: str
:param DeviceId: 设备指纹ID
:type DeviceId: str
:param IMEI: 设备序列号
:type IMEI: str
:param IDFA: IOS设备,Identifier For Advertising(广告标识符)
:type IDFA: str
:param IDFV: IOS设备,IDFV - Identifier For Vendor(应用开发商标识符)
:type IDFV: str
"""
self.IP = None
self.Mac = None
self.TokenId = None
self.DeviceId = None
self.IMEI = None
self.IDFA = None
self.IDFV = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.Mac = params.get("Mac")
self.TokenId = params.get("TokenId")
self.DeviceId = params.get("DeviceId")
self.IMEI = params.get("IMEI")
self.IDFA = params.get("IDFA")
self.IDFV = params.get("IDFV")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RiskDetails(AbstractModel):
"""账号风险检测结果
"""
def __init__(self):
"""
:param Label: 风险类别,RiskAccount,RiskIP, RiskIMEI
:type Label: str
:param Level: 风险等级,1:疑似,2:恶意
:type Level: int
"""
self.Label = None
self.Level = None
def _deserialize(self, params):
self.Label = params.get("Label")
self.Level = params.get("Level")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TextModerationRequest(AbstractModel):
"""TextModeration请求参数结构体
"""
def __init__(self):
"""
:param Content: 文本内容Base64编码。原文长度需小于15000字节,即5000个汉字以内。
:type Content: str
:param BizType: 该字段用于标识业务场景。您可以在内容安全控制台创建对应的ID,配置不同的内容审核策略,通过接口调用,默认不填为0,后端使用默认策略。 -- 该字段暂未开放。
:type BizType: str
:param DataId: 数据ID,英文字母、下划线、-组成,不超过64个字符
:type DataId: str
:param User: 账号相关信息字段,填入后可识别违规风险账号。
:type User: :class:`tencentcloud.tms.v20201229.models.User`
:param Device: 设备相关信息字段,填入后可识别违规风险设备。
:type Device: :class:`tencentcloud.tms.v20201229.models.Device`
"""
self.Content = None
self.BizType = None
self.DataId = None
self.User = None
self.Device = None
def _deserialize(self, params):
self.Content = params.get("Content")
self.BizType = params.get("BizType")
self.DataId = params.get("DataId")
if params.get("User") is not None:
self.User = User()
self.User._deserialize(params.get("User"))
if params.get("Device") is not None:
self.Device = Device()
self.Device._deserialize(params.get("Device"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TextModerationResponse(AbstractModel):
"""TextModeration返回参数结构体
"""
def __init__(self):
"""
:param BizType: 您在入参时所填入的Biztype参数。 -- 该字段暂未开放。
:type BizType: str
:param Label: 恶意标签,Normal:正常,Porn:色情,Abuse:谩骂,Ad:广告,Custom:自定义词库。
以及其他令人反感、不安全或不适宜的内容类型。
:type Label: str
:param Suggestion: 建议您拿到判断结果后的执行操作。
建议值,Block:建议屏蔽,Review:建议复审,Pass:建议通过
:type Suggestion: str
:param Keywords: 文本命中的关键词信息,用于提示您文本违规的具体原因,可能会返回多个命中的关键词。(如:加我微信)
如返回值为空,Score不为空,即识别结果(Label)是来自于语义模型判断的返回值。
注意:此字段可能返回 null,表示取不到有效值。
:type Keywords: list of str
:param Score: 机器判断当前分类的置信度,取值范围:0.00~100.00。分数越高,表示越有可能属于当前分类。
(如:色情 99.99,则该样本属于色情的置信度非常高。)
:type Score: int
:param DetailResults: 接口识别样本后返回的详细结果。
注意:此字段可能返回 null,表示取不到有效值。
:type DetailResults: list of DetailResults
:param RiskDetails: 接口识别样本中存在违规账号风险的检测结果。
注意:此字段可能返回 null,表示取不到有效值。
:type RiskDetails: list of RiskDetails
:param Extra: 扩展字段,用于特定信息返回,不同客户/Biztype下返回信息不同。
注意:此字段可能返回 null,表示取不到有效值。
:type Extra: str
:param DataId: 请求参数中的DataId
注意:此字段可能返回 null,表示取不到有效值。
:type DataId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BizType = None
self.Label = None
self.Suggestion = None
self.Keywords = None
self.Score = None
self.DetailResults = None
self.RiskDetails = None
self.Extra = None
self.DataId = None
self.RequestId = None
def _deserialize(self, params):
self.BizType = params.get("BizType")
self.Label = params.get("Label")
self.Suggestion = params.get("Suggestion")
self.Keywords = params.get("Keywords")
self.Score = params.get("Score")
if params.get("DetailResults") is not None:
self.DetailResults = []
for item in params.get("DetailResults"):
obj = DetailResults()
obj._deserialize(item)
self.DetailResults.append(obj)
if params.get("RiskDetails") is not None:
self.RiskDetails = []
for item in params.get("RiskDetails"):
obj = RiskDetails()
obj._deserialize(item)
self.RiskDetails.append(obj)
self.Extra = params.get("Extra")
self.DataId = params.get("DataId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class User(AbstractModel):
"""用户相关信息
"""
def __init__(self):
"""
:param UserId: 用户账号ID,如填写,会根据账号历史恶意情况,判定消息有害结果,特别是有利于可疑恶意情况下的辅助判断。账号可以填写微信uin、QQ号、微信openid、QQopenid、字符串等。该字段和账号类别确定唯一账号。
:type UserId: str
:param Nickname: 用户昵称
:type Nickname: str
:param AccountType: 账号类别,"1-微信uin 2-QQ号 3-微信群uin 4-qq群号 5-微信openid 6-QQopenid 7-其它string"
:type AccountType: int
:param Gender: 性别 默认0 未知 1 男性 2 女性
:type Gender: int
:param Age: 年龄 默认0 未知
:type Age: int
:param Level: 用户等级,默认0 未知 1 低 2 中 3 高
:type Level: int
:param Phone: 手机号
:type Phone: str
"""
self.UserId = None
self.Nickname = None
self.AccountType = None
self.Gender = None
self.Age = None
self.Level = None
self.Phone = None
def _deserialize(self, params):
self.UserId = params.get("UserId")
self.Nickname = params.get("Nickname")
self.AccountType = params.get("AccountType")
self.Gender = params.get("Gender")
self.Age = params.get("Age")
self.Level = params.get("Level")
self.Phone = params.get("Phone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| 32.214953 | 128 | 0.601876 |
acdec33a934b725be8af0f40384e79f7c4fc319e | 6,985 | py | Python | app/recipe/tests/test_tag_api.py | amirtds/recipe-app-api | 247efacfe2c6469c78812aaceae180a68ddb081a | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_api.py | amirtds/recipe-app-api | 247efacfe2c6469c78812aaceae180a68ddb081a | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_api.py | amirtds/recipe-app-api | 247efacfe2c6469c78812aaceae180a68ddb081a | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Ingredient
from recipe.serializers import TagSerializer, IngredientSerializer
TAGS_URL = reverse('recipe:tag-list')
INGREDIENTS_URL = reverse("recipe:ingredient-list")
# 1. test that api is not public
class PublicTagApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_api_fail_not_authenticated(self):
# 1.1 hit the list api
response = self.client.get(TAGS_URL)
# 1.2 check if status is unauthorized
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagApiTests(TestCase):
def setUp(self):
# create a user
self.user = get_user_model().objects.create_user(
email="test@example.com",
password="amir@123"
)
# create a client and authenticate
self.client = APIClient()
self.client.force_authenticate(self.user)
# 2. test that api lists items when user is authenticated
def test_api_list_success_authenticated(self):
# 2.1 create couple of tags
Tag.objects.create(name="test1", user=self.user)
Tag.objects.create(name="test2", user=self.user)
# 2.2 hit the list api with authenticated user
response = self.client.get(TAGS_URL)
# 2.3 check if the response is 200
self.assertEqual(response.status_code, status.HTTP_200_OK)
# 2.4 check if response contains any data
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.data, serializer.data)
# 3 check if response contains only user's data
def test_api_list_limited_to_user(self):
# 3.1 create another user
user2 = get_user_model().objects.create_user(
email="test2@example.com",
password="test@123"
)
# 3.2 create a tag with new user
tag1 = Tag.objects.create(user=user2, name="tag1")
# 3.3 create a tag with old user
tag2 = Tag.objects.create(user=self.user, name="tag2")
# 3.4 hit API with old user
response = self.client.get(TAGS_URL)
# 3.5 check api only contains old user tags
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], tag2.name)
self.assertFalse(response.data[0]['name'] == tag1.name)
# 3. test api creates items if users authenticated
def test_create_tag_successful(self):
# 3.1 create a payload contains tags name
payload = {'name': "test tag"}
# 3.2 send the payload to tags api
response = self.client.post(TAGS_URL, payload)
# 3.3 check the response is 201
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# 3.4 check if the tag exist in database
exists = Tag.objects.filter(
name=payload['name'],
user=self.user
).exists()
self.assertTrue(exists)
# 4. check if tag not get create if payload is empty
def test_tag_api_fail_missed_field(self):
paylaod = {"name": ""}
response = self.client.post(TAGS_URL, paylaod)
# 4.1 check the repsonse is 400 bad request
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# 5. Test ingredient API - Public
class TestIngredientApiPiblic(TestCase):
# 5.1 create not authenticated client
def setUp(self):
self.client = APIClient()
def test_api_fail_not_authenticated(self):
# 5.2 hit the ingredient list endpoint
response = self.client.get(INGREDIENTS_URL)
# 5.3 check if the status is unauthorized
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# 6. Test ingredient API - Private
class TestIngredientApiPrivate(TestCase):
# 6.1 make setup function to create client and user
def setUp(self):
self.user = get_user_model().objects.create_user(
email="test@example.com",
password="test@123"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_api_list_success_authenticated(self):
# 6.2 hit the ingredient endpoint as authenticated user
Ingredient.objects.create(
user=self.user,
name="test ingredient"
)
response = self.client.get(INGREDIENTS_URL)
# 6.3.1 check if status code is HTTP_200_OK
self.assertEqual(response.status_code, status.HTTP_200_OK)
# 6.3.2 check if there is ingredient in the data
self.assertEqual(len(response.data), 1)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(response.data, serializer.data)
# 6.4 check if user can only see his/her ingredient
def test_api_list_limited_to_user(self):
# 6.4.1 create new user
user2 = get_user_model().objects.create_user(
email="test2@example.com",
password="test@123"
)
ingredient1 = Ingredient.objects.create(
user=self.user,
name="ingredient1"
)
# 6.4.2 create new ingredient with new user
ingredient2 = Ingredient.objects.create(
user=user2,
name="ingredient2"
)
# 6.4.3 check response only contains user's ingredient
response = self.client.get(INGREDIENTS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], ingredient1.name)
self.assertFalse(response.data[0]['name'] == ingredient2.name)
# 6.5 Test ingredient creation endpoint
def test_create_ingredient_successful(self):
# 6.5.2 create a payload to contain ingredient
payload = {'name': 'ingredient1'}
# 6.5.3 hit the endpoint to create an ingredient
response = self.client.post(INGREDIENTS_URL, payload)
# 6.5.4 check if the status is HTTP_201_CREATED
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# 6.5.5 check if the response contains the created ingredient
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
# 6.6 check creation fails without fields
def test_ingredient_creation_fails_missing_fields(self):
payload = {"name": ""}
response = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 38.379121 | 76 | 0.662133 |
acdec3a7e3754d4cd8ad5f87fc981e3815dc5373 | 2,847 | py | Python | tests/test_call_api.py | nonebot/nonebug | 40fcd4f3eff8f4b2118e95938fabc3d77ff6819c | [
"MIT"
] | 9 | 2021-10-09T05:19:13.000Z | 2022-03-18T15:18:00.000Z | tests/test_call_api.py | AkiraXie/nonebug | 5556f94f3e85a26602fc015013e9fbdda07f8c71 | [
"MIT"
] | 2 | 2021-11-23T06:29:20.000Z | 2022-03-18T15:51:51.000Z | tests/test_call_api.py | AkiraXie/nonebug | 5556f94f3e85a26602fc015013e9fbdda07f8c71 | [
"MIT"
] | 1 | 2022-02-19T08:57:50.000Z | 2022-02-19T08:57:50.000Z | import pytest
from utils import make_fake_event
from nonebug.fixture import *
from nonebug import ProcessorApp
@pytest.mark.asyncio
async def test_should_call_api(processor_app: "ProcessorApp"):
async with processor_app.test_api() as ctx:
api = ctx.should_call_api("test", {"data": "data"}, "result")
queue = ctx.wait_list
assert not queue.empty()
assert api == queue.get()
assert (
api.name == "test"
and api.data == {"data": "data"}
and api.result == "result"
)
@pytest.mark.asyncio
async def test_should_call_send(processor_app: "ProcessorApp"):
from nonebot.adapters import Event, Message
class FakeEvent(Event):
def get_type(self) -> str:
return "test"
def get_event_name(self) -> str:
return "test"
def get_event_description(self) -> str:
return "test"
def get_user_id(self) -> str:
return "test"
def get_session_id(self) -> str:
return "test"
def get_message(self) -> Message:
raise NotImplementedError
def is_tome(self) -> bool:
return True
class Config:
extra = "forbid"
event = FakeEvent()
async with processor_app.test_api() as ctx:
send = ctx.should_call_send(event, "test message", "result")
queue = ctx.wait_list
assert not queue.empty()
assert send == queue.get()
assert (
send.event is event
and send.message == "test message"
and send.result == "result"
)
@pytest.mark.asyncio
async def test_got_call(processor_app: "ProcessorApp"):
async with processor_app.test_api() as ctx:
bot = ctx.create_bot()
api = ctx.should_call_api("test", {"key": "value"}, "result")
result = await bot.call_api("test", key="value")
assert ctx.wait_list.empty()
assert result == "result"
async with processor_app.test_api() as ctx:
bot = ctx.create_bot()
event = make_fake_event()()
api = ctx.should_call_send(event, "test", "result", key="value")
result = await bot.send(event, "test", key="value")
assert ctx.wait_list.empty()
assert result == "result"
@pytest.mark.asyncio
async def test_fake(processor_app: "ProcessorApp"):
from nonebot.adapters import Bot, Adapter
class FakeAdapter(Adapter):
...
class FakeBot(Bot):
...
async with processor_app.test_api() as ctx:
adapter = ctx.create_adapter(base=FakeAdapter)
assert isinstance(adapter, FakeAdapter)
assert adapter.get_name() == "fake"
bot = ctx.create_bot(base=FakeBot, adapter=adapter)
assert isinstance(bot, FakeBot)
assert bot.self_id == "test"
| 28.757576 | 72 | 0.605901 |
acdec53cad4b8d638131137b73b89204a6005e70 | 12,180 | py | Python | webapi/servidorapi/views.py | aecheverria40/proyectomoviles | 5eff2b1c935324e07da3fb82a43b00e528d8c7c4 | [
"MIT"
] | null | null | null | webapi/servidorapi/views.py | aecheverria40/proyectomoviles | 5eff2b1c935324e07da3fb82a43b00e528d8c7c4 | [
"MIT"
] | null | null | null | webapi/servidorapi/views.py | aecheverria40/proyectomoviles | 5eff2b1c935324e07da3fb82a43b00e528d8c7c4 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.models import User, Group
#Importar clase de rest_framework
from rest_framework import viewsets
#Importamos las cases de serializers.py
from servidorapi.serializers import UserSerializer, GroupSerializer, UserApiSerializer
#Cosas a importar
from rest_framework import status, permissions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from .models import (Alumno, Boleta, Clase, Coordinador, Docente, Escuela,
Parcial)
#Importar Serializers
from .serializers import (AlumnoSerializer, BoletaSerializer,
ClaseSerializer, CoordinadorSerializer, DocenteSerializer, EscuelaSerializer)
from rest_framework.generics import CreateAPIView
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
'''
Parte de la API que permite a los usuarios ser vistos o editados.
'''
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
'''
Parte de la API que permite a los grupos ser vistos o editados.
'''
queryset = Group.objects.all()
serializer_class = GroupSerializer
class CoornidadorViewSet(viewsets.ModelViewSet):
queryset = Coordinador.objects.all()
serializer_class = UserApiSerializer
http_method_names = ['get', 'put', 'patch', 'head']
class UserCreateAPI(CreateAPIView):
serializer_class = UserApiSerializer
queryset = User.objects.all()
#De aqui para abajo los avances para la diapositiva
#De prueba POST y GET de Usar
@api_view(['GET','POST'])
def user_view(request, format=None):
if request.method == 'GET':
user = User.objects.all()
seralizer = UserApiSerializer(user, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = UserApiSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
# user.refresh_from_db()
# user.coordinador.apellidoMaternoCoordinador = "Mario"
# user.coordinador.nombreCoordinador = "Mario"
# user.coordinador.direccionCoordinador = "Mario"
# user.coordinador.telefonoCoordinador = "6641234567"
# coordinador.IdCoordinador = user.id
# user.first_name = seralizer.coordinador.nombreCoordinador
# user.last_name = seralizer.coordinador.apellidoPaternoCoordinador
# user.coordinador.email_Coordinador = user.email
# user.set_password(validated_data['password'])
# user.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'POST'])
#@permission_classes((permissions.AllowAny,))
def alumno_list(request, format=None):
'''
Para listar todos los alumnos o dar de alta alumnos
'''
if request.method == 'GET':
alumno = Alumno.objects.all()
seralizer = AlumnoSerializer(alumno, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = AlumnoSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def alumno_detail(request, pk, format=None):
'''
Detalles, actualizar o eliminar alumno
'''
try:
alumno = Alumno.objects.get(pk=pk)
except Alumno.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = AlumnoSerializer(alumno)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = AlumnoSerializer(alumno, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
alumno.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
Coordinador
'''
@api_view(['GET','POST'])
def coordinador_list(request):
if request.method == 'GET':
coordinador = Coordinador.objects.all()
seralizer = CoordinadorSerializer(coordinador, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = CoordinadorSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def coordinador_detail(request, pk):
try:
coordinador = Coordinador.objects.get(pk=pk)
except Coordinador.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CoordinadorSerializer(coordinador)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = CoordinadorSerializer(coordinador, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
coordinador.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''Vistas Brenda'''
'''
Escuela
'''
@api_view(['GET','POST'])
def escuela_list(request):
if request.method == 'GET':
escuela = Escuela.objects.all()
seralizer = EscuelaSerializer(escuela, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = EscuelaSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def escuela_detail(request, pk):
try:
escuela = Escuela.objects.get(pk=pk)
except Escuela.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = EscuelaSerializer(escuela)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = EscuelaSerializer(escuela, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
escuela.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
Docente
'''
@api_view(['GET','POST'])
def docente_list(request):
if request.method == 'GET':
docente = Docente.objects.all()
seralizer = DocenteSerializer(docente, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = DocenteSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def docente_detail(request, pk):
try:
docente = Docente.objects.get(pk=pk)
except Docente.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DocenteSerializer(docente)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = DocenteSerializer(docente, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
docente.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
Boleta
'''
@api_view(['GET','POST'])
def boleta_list(request):
if request.method == 'GET':
boleta = Boleta.objects.all()
seralizer = BoletaSerializer(boleta, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = BoletaSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def boleta_detail(request, pk):
try:
boleta = Boleta.objects.get(pk=pk)
except Boleta.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BoletaSerializer(boleta)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = BoletaSerializer(boleta, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
boleta.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
Clase
'''
@api_view(['GET','POST'])
def clase_list(request):
if request.method == 'GET':
clase = Clase.objects.all()
seralizer = ClaseSerializer(clase, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = ClaseSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def clase_detail(request, pk):
try:
clase = Clase.objects.get(pk=pk)
except Clase.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ClaseSerializer(Clase)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = ClaseSerializer(clase, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
clase.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
'''
Parcial
'''
@api_view(['GET','POST'])
def parcial_list(request):
if request.method == 'GET':
parcial = Parcial.objects.all()
seralizer = ParcialSerializer(parcial, many=True)
return Response(seralizer.data)
elif request.method == 'POST':
seralizer = ParcialSerializer(data=request.data)
if seralizer.is_valid():
seralizer.save()
return Response(seralizer.data, status=status.HTTP_201_CREATED)
return Response(seralizer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def parcial_detail(request, pk):
try:
parcial = Parcial.objects.get(pk=pk)
except Parcial.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ParcialSerializer(Parcial)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = ParcialSerializer(parcial, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
parcial.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 34.213483 | 86 | 0.679064 |
acdec5706a94f410d194cb872ff07027e390c27d | 6,360 | py | Python | notebooks/shared/nbconvert/filters/strings.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 728 | 2018-09-21T03:51:04.000Z | 2022-03-28T09:35:04.000Z | notebooks/shared/nbconvert/filters/strings.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 103 | 2018-09-02T12:26:32.000Z | 2022-02-09T07:19:08.000Z | notebooks/shared/nbconvert/filters/strings.py | leonbett/debuggingbook | ae1fa940c306160429232fbc93a7a7f14b44efb7 | [
"MIT"
] | 157 | 2018-09-02T08:00:50.000Z | 2022-03-27T22:04:50.000Z | # coding: utf-8
"""String filters.
Contains a collection of useful string manipulation filters for use in Jinja
templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import textwrap
import warnings
try:
from urllib.parse import quote # Py 3
except ImportError:
from urllib2 import quote # Py 2
from xml.etree import ElementTree
from ipython_genutils import py3compat
__all__ = [
'wrap_text',
'html2text',
'add_anchor',
'strip_dollars',
'strip_files_prefix',
'comment_lines',
'get_lines',
'ipython2python',
'posix_path',
'path2url',
'add_prompts',
'ascii_only',
'prevent_list_blocks',
]
def wrap_text(text, width=100):
"""
Intelligently wrap text.
Wrap text without breaking words if possible.
Parameters
----------
text : str
Text to wrap.
width : int, optional
Number of characters to wrap to, default 100.
"""
split_text = text.split('\n')
wrp = map(lambda x:textwrap.wrap(x,width), split_text)
wrpd = map('\n'.join, wrp)
return '\n'.join(wrpd)
def html2text(element):
"""extract inner text from html
Analog of jQuery's $(element).text()
"""
if isinstance(element, py3compat.string_types):
try:
element = ElementTree.fromstring(element)
except Exception:
# failed to parse, just return it unmodified
return element
text = element.text or ""
for child in element:
text += html2text(child)
text += (element.tail or "")
return text
def _convert_header_id(header_contents):
"""Convert header contents to valid id value. Takes string as input, returns string.
Note: this may be subject to change in the case of changes to how we wish to generate ids.
For use on markdown headings.
"""
return header_contents.replace(' ', '-')
def add_anchor(html, anchor_link_text=u'¶'):
"""Add an id and an anchor-link to an html header
For use on markdown headings
"""
try:
h = ElementTree.fromstring(py3compat.cast_bytes_py2(html, encoding='utf-8'))
except Exception:
# failed to parse, just return it unmodified
return html
link = _convert_header_id(html2text(h))
h.set('id', link)
a = ElementTree.Element("a", {"class" : "anchor-link", "href" : "#" + link})
a.text = anchor_link_text
h.append(a)
# Known issue of Python3.x, ElementTree.tostring() returns a byte string
# instead of a text string. See issue http://bugs.python.org/issue10942
# Workaround is to make sure the bytes are casted to a string.
return py3compat.decode(ElementTree.tostring(h), 'utf-8')
def add_prompts(code, first='>>> ', cont='... '):
"""Add prompts to code snippets"""
new_code = []
code_list = code.split('\n')
new_code.append(first + code_list[0])
for line in code_list[1:]:
new_code.append(cont + line)
return '\n'.join(new_code)
def strip_dollars(text):
"""
Remove all dollar symbols from text
Parameters
----------
text : str
Text to remove dollars from
"""
return text.strip('$')
files_url_pattern = re.compile(r'(src|href)\=([\'"]?)/?files/')
markdown_url_pattern = re.compile(r'(!?)\[(?P<caption>.*?)\]\(/?files/(?P<location>.*?)\)')
def strip_files_prefix(text):
"""
Fix all fake URLs that start with `files/`, stripping out the `files/` prefix.
Applies to both urls (for html) and relative paths (for markdown paths).
Parameters
----------
text : str
Text in which to replace 'src="files/real...' with 'src="real...'
"""
cleaned_text = files_url_pattern.sub(r"\1=\2", text)
cleaned_text = markdown_url_pattern.sub(r'\1[\2](\3)', cleaned_text)
return cleaned_text
def comment_lines(text, prefix='# '):
"""
Build a Python comment line from input text.
Parameters
----------
text : str
Text to comment out.
prefix : str
Character to append to the start of each line.
"""
#Replace line breaks with line breaks and comment symbols.
#Also add a comment symbol at the beginning to comment out
#the first line.
return prefix + ('\n'+prefix).join(text.split('\n'))
def get_lines(text, start=None,end=None):
"""
Split the input text into separate lines and then return the
lines that the caller is interested in.
Parameters
----------
text : str
Text to parse lines from.
start : int, optional
First line to grab from.
end : int, optional
Last line to grab from.
"""
# Split the input into lines.
lines = text.split("\n")
# Return the right lines.
return "\n".join(lines[start:end]) #re-join
def ipython2python(code):
"""Transform IPython syntax to pure Python syntax
Parameters
----------
code : str
IPython code, to be transformed to pure Python
"""
try:
from IPython.core.inputsplitter import IPythonInputSplitter
except ImportError:
warnings.warn(
"IPython is needed to transform IPython syntax to pure Python."
" Install ipython if you need this functionality."
)
return code
else:
isp = IPythonInputSplitter(line_input_checker=False)
return isp.transform_cell(code)
def posix_path(path):
"""Turn a path into posix-style path/to/etc
Mainly for use in latex on Windows,
where native Windows paths are not allowed.
"""
if os.path.sep != '/':
return path.replace(os.path.sep, '/')
return path
def path2url(path):
"""Turn a file path into a URL"""
parts = path.split(os.path.sep)
return '/'.join(quote(part) for part in parts)
def ascii_only(s):
"""ensure a string is ascii"""
s = py3compat.cast_unicode(s)
return s.encode('ascii', 'replace').decode('ascii')
def prevent_list_blocks(s):
"""
Prevent presence of enumerate or itemize blocks in latex headings cells
"""
out = re.sub('(^\s*\d*)\.', '\\1\.', s)
out = re.sub('(^\s*)\-', '\\1\-', out)
out = re.sub('(^\s*)\+', '\\1\+', out)
out = re.sub('(^\s*)\*', '\\1\*', out)
return out
| 26.390041 | 94 | 0.620283 |
acdec579d598081ae7994ebc799c989c21af42a1 | 289 | py | Python | .history/section4/pp_01_20200730233019.py | KustomApe/python_engineer | cb931f5c685be87ea518c7a0a6dd89154dce03a8 | [
"MIT"
] | null | null | null | .history/section4/pp_01_20200730233019.py | KustomApe/python_engineer | cb931f5c685be87ea518c7a0a6dd89154dce03a8 | [
"MIT"
] | null | null | null | .history/section4/pp_01_20200730233019.py | KustomApe/python_engineer | cb931f5c685be87ea518c7a0a6dd89154dce03a8 | [
"MIT"
] | null | null | null | s = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
print(s[0])
s[0] = 'X'
print(s[2:5])
s[2:5] = ['C', 'D', 'E']
print(s)
s[2:5] = []
print(s)
s[:] = []
print(s)
n = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
n.append(100)
n.insert(0, 200)
print(n)
n.pop()
print(n)
n.pop(0)
print(n)
del n[0]
print(n)
| 9.633333 | 39 | 0.432526 |
acdec5aebb365bea2f95c699b7157197ac07768f | 3,745 | py | Python | src/matchmaker/settings/local.py | anuragb26/matchmaker | 92c55d72bd7896718aebd88fe7037f5dc763cfdd | [
"MIT"
] | null | null | null | src/matchmaker/settings/local.py | anuragb26/matchmaker | 92c55d72bd7896718aebd88fe7037f5dc763cfdd | [
"MIT"
] | 4 | 2020-06-05T17:50:59.000Z | 2021-06-10T20:00:15.000Z | src/matchmaker/settings/local.py | anuragb26/matchmaker | 92c55d72bd7896718aebd88fe7037f5dc763cfdd | [
"MIT"
] | null | null | null | """
Django settings for matchmaker project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#root of project
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'csqwlmc8s55o($rt6ozh7u+ui9zb-et00w$d90j8$^!nvj41_r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'yourgmail@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
# Application definition
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'matches',
'newsletter',
'questions',
'profiles'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'matchmaker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'matchmaker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_in_pro", "our_static"),
#os.path.join(BASE_DIR, "static_in_env"),
#'/var/www/static/',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "media_root")
#Crispy FORM TAGs SETTINGS
CRISPY_TEMPLATE_PACK = 'bootstrap3'
#DJANGO REGISTRATION REDUX SETTINGS
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
| 24.477124 | 87 | 0.707076 |
acdec5cf2b15936bbad91cfb0b8db45012267766 | 579 | py | Python | setup.py | ysong126/lstm_reshape | 4b75bc28ee45d857003d659be271a49202c3941c | [
"MIT"
] | null | null | null | setup.py | ysong126/lstm_reshape | 4b75bc28ee45d857003d659be271a49202c3941c | [
"MIT"
] | null | null | null | setup.py | ysong126/lstm_reshape | 4b75bc28ee45d857003d659be271a49202c3941c | [
"MIT"
] | null | null | null | lsimport setuptools
setuptools.setup(
name="lstm_preprocess",
version="0.1.0",
author="song111",
author_email="clarksoyoung@sina.com",
description="This package provides fundamental operation on reshaping dataframes for Keras LSTM input",
url="https://github.com/ysong126/lstm_reshaper",
project_urls={
"Bug Tracker": "https://github.com/ysong126/lstm_reshaper",
},
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 30.473684 | 107 | 0.661485 |
acdec5d2f2207859148e7afd78a404bc9954b24f | 351 | py | Python | explorer_backend/explorer_backend/urls.py | newearthmartin/hacktravel | 9acfa0a805b2285fc15a6edcd98858085779da4a | [
"Apache-2.0"
] | null | null | null | explorer_backend/explorer_backend/urls.py | newearthmartin/hacktravel | 9acfa0a805b2285fc15a6edcd98858085779da4a | [
"Apache-2.0"
] | null | null | null | explorer_backend/explorer_backend/urls.py | newearthmartin/hacktravel | 9acfa0a805b2285fc15a6edcd98858085779da4a | [
"Apache-2.0"
] | 1 | 2019-07-04T15:19:07.000Z | 2019-07-04T15:19:07.000Z | from django.conf import settings
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from scanner.views import home, orgs_view
urlpatterns = [
path('admin/', admin.site.urls),
path('orgs', orgs_view),
path('', home),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 29.25 | 67 | 0.754986 |
acdec6e175f1a2a3cb51a7af97e1cca35997b78f | 4,260 | py | Python | plugins/flytekit-kf-mpi/flytekitplugins/kfmpi/task.py | ggydush-fn/flytekit | 6530601c2538a5d804127a97f63291730b1ba1d8 | [
"Apache-2.0"
] | 1 | 2021-11-11T10:10:10.000Z | 2021-11-11T10:10:10.000Z | plugins/flytekit-kf-mpi/flytekitplugins/kfmpi/task.py | ggydush-fn/flytekit | 6530601c2538a5d804127a97f63291730b1ba1d8 | [
"Apache-2.0"
] | null | null | null | plugins/flytekit-kf-mpi/flytekitplugins/kfmpi/task.py | ggydush-fn/flytekit | 6530601c2538a5d804127a97f63291730b1ba1d8 | [
"Apache-2.0"
] | null | null | null | """
This Plugin adds the capability of running distributed MPI training to Flyte using backend plugins, natively on
Kubernetes. It leverages `MPI Job <https://github.com/kubeflow/mpi-operator>`_ Plugin from kubeflow.
"""
from dataclasses import dataclass
from typing import Any, Callable, Dict, List
from flyteidl.plugins import mpi_pb2 as _mpi_task
from google.protobuf.json_format import MessageToDict
from flytekit import PythonFunctionTask
from flytekit.extend import SerializationSettings, TaskPlugins
from flytekit.models import common as _common
class MPIJobModel(_common.FlyteIdlEntity):
"""Model definition for MPI the plugin
Args:
num_workers: integer determining the number of worker replicas spawned in the cluster for this job
(in addition to 1 master).
num_launcher_replicas: Number of launcher server replicas to use
slots: Number of slots per worker used in hostfile
.. note::
Please use resources=Resources(cpu="1"...) to specify per worker resource
"""
def __init__(self, num_workers, num_launcher_replicas, slots):
self._num_workers = num_workers
self._num_launcher_replicas = num_launcher_replicas
self._slots = slots
@property
def num_workers(self):
return self._num_workers
@property
def num_launcher_replicas(self):
return self._num_launcher_replicas
@property
def slots(self):
return self._slots
def to_flyte_idl(self):
return _mpi_task.DistributedMPITrainingTask(
num_workers=self.num_workers, num_launcher_replicas=self.num_launcher_replicas, slots=self.slots
)
@classmethod
def from_flyte_idl(cls, pb2_object):
return cls(
num_workers=pb2_object.num_workers,
num_launcher_replicas=pb2_object.num_launcher_replicas,
slots=pb2_object.slots,
)
@dataclass
class MPIJob(object):
"""
Configuration for an executable `MPI Job <https://github.com/kubeflow/mpi-operator>`_. Use this
to run distributed training on k8s with MPI
Args:
num_workers: integer determining the number of worker replicas spawned in the cluster for this job
(in addition to 1 master).
num_launcher_replicas: Number of launcher server replicas to use
slots: Number of slots per worker used in hostfile
"""
slots: int
num_launcher_replicas: int = 1
num_workers: int = 1
class MPIFunctionTask(PythonFunctionTask[MPIJob]):
"""
Plugin that submits a MPIJob (see https://github.com/kubeflow/mpi-operator)
defined by the code within the _task_function to k8s cluster.
"""
_MPI_JOB_TASK_TYPE = "mpi"
_MPI_BASE_COMMAND = [
"mpirun",
"--allow-run-as-root",
"-bind-to",
"none",
"-map-by",
"slot",
"-x",
"LD_LIBRARY_PATH",
"-x",
"PATH",
"-x",
"NCCL_DEBUG=INFO",
"-mca",
"pml",
"ob1",
"-mca",
"btl",
"^openib",
]
def __init__(self, task_config: MPIJob, task_function: Callable, **kwargs):
super().__init__(
task_config=task_config,
task_function=task_function,
task_type=self._MPI_JOB_TASK_TYPE,
**kwargs,
)
def get_command(self, settings: SerializationSettings) -> List[str]:
cmd = super().get_command(settings)
num_procs = self.task_config.num_workers * self.task_config.slots
mpi_cmd = self._MPI_BASE_COMMAND + ["-np", f"{num_procs}"] + ["python", settings.entrypoint_settings.path] + cmd
# the hostfile is set automatically by MPIOperator using env variable OMPI_MCA_orte_default_hostfile
return mpi_cmd
def get_custom(self, settings: SerializationSettings) -> Dict[str, Any]:
job = MPIJobModel(
num_workers=self.task_config.num_workers,
num_launcher_replicas=self.task_config.num_launcher_replicas,
slots=self.task_config.slots,
)
return MessageToDict(job.to_flyte_idl())
# Register the MPI Plugin into the flytekit core plugin system
TaskPlugins.register_pythontask_plugin(MPIJob, MPIFunctionTask)
| 31.094891 | 120 | 0.673005 |
acdec76ddf534c200292edf0ae4404f443848d73 | 6,139 | py | Python | datasets/librispeech_asr/librispeech_asr.py | zidingz/datasets | 02edd9ebc79f715adb1c718d1439fda83dc356f1 | [
"Apache-2.0"
] | 2 | 2021-11-14T09:11:43.000Z | 2021-11-14T10:07:49.000Z | datasets/librispeech_asr/librispeech_asr.py | zidingz/datasets | 02edd9ebc79f715adb1c718d1439fda83dc356f1 | [
"Apache-2.0"
] | null | null | null | datasets/librispeech_asr/librispeech_asr.py | zidingz/datasets | 02edd9ebc79f715adb1c718d1439fda83dc356f1 | [
"Apache-2.0"
] | 2 | 2021-12-01T16:25:34.000Z | 2021-12-01T16:25:42.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Librispeech automatic speech recognition dataset."""
import glob
import os
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@inproceedings{panayotov2015librispeech,
title={Librispeech: an ASR corpus based on public domain audio books},
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
pages={5206--5210},
year={2015},
organization={IEEE}
}
"""
_DESCRIPTION = """\
LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
Note that in order to limit the required storage for preparing this dataset, the audio
is stored in the .flac format and is not converted to a float32 array. To convert, the audio
file to a float32 array, please make use of the `.map()` function as follows:
```python
import soundfile as sf
def map_to_array(batch):
speech_array, _ = sf.read(batch["file"])
batch["speech"] = speech_array
return batch
dataset = dataset.map(map_to_array, remove_columns=["file"])
```
"""
_URL = "http://www.openslr.org/12"
_DL_URL = "http://www.openslr.org/resources/12/"
_DL_URLS = {
"clean": {
"dev": _DL_URL + "dev-clean.tar.gz",
"test": _DL_URL + "test-clean.tar.gz",
"train.100": _DL_URL + "train-clean-100.tar.gz",
"train.360": _DL_URL + "train-clean-360.tar.gz",
},
"other": {
"test": _DL_URL + "test-other.tar.gz",
"dev": _DL_URL + "dev-other.tar.gz",
"train.500": _DL_URL + "train-other-500.tar.gz",
},
}
class LibrispeechASRConfig(datasets.BuilderConfig):
"""BuilderConfig for LibriSpeechASR."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
class LibrispeechASR(datasets.GeneratorBasedBuilder):
"""Librispeech dataset."""
BUILDER_CONFIGS = [
LibrispeechASRConfig(name="clean", description="'Clean' speech."),
LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
"speaker_id": datasets.Value("int64"),
"chapter_id": datasets.Value("int64"),
"id": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_URL,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download_and_extract(_DL_URLS[self.config.name])
if self.config.name == "clean":
train_splits = [
datasets.SplitGenerator(name="train.100", gen_kwargs={"archive_path": archive_path["train.100"]}),
datasets.SplitGenerator(name="train.360", gen_kwargs={"archive_path": archive_path["train.360"]}),
]
elif self.config.name == "other":
train_splits = [
datasets.SplitGenerator(name="train.500", gen_kwargs={"archive_path": archive_path["train.500"]}),
]
return train_splits + [
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path["test"]}),
]
def _generate_examples(self, archive_path):
"""Generate examples from a LibriSpeech archive_path."""
transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*/*/*/*.txt")
key = 0
for transcript_path in sorted(glob.glob(transcripts_glob)):
transcript_dir_path = os.path.dirname(transcript_path)
with open(transcript_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
id_, transcript = line.split(" ", 1)
audio_file = f"{id_}.flac"
speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
yield key, {
"id": id_,
"speaker_id": speaker_id,
"chapter_id": chapter_id,
"file": os.path.join(transcript_dir_path, audio_file),
"audio": os.path.join(transcript_dir_path, audio_file),
"text": transcript,
}
key += 1
| 38.610063 | 118 | 0.628278 |
acdec842184e4530d12edf7829dee309c24c22b7 | 3,202 | bzl | Python | scala/scala_cross_version.bzl | andyscott/bazelbuild_rules_scala | 18889f641ed62dd09aeb71d1505f3834ccc33be3 | [
"Apache-2.0"
] | null | null | null | scala/scala_cross_version.bzl | andyscott/bazelbuild_rules_scala | 18889f641ed62dd09aeb71d1505f3834ccc33be3 | [
"Apache-2.0"
] | 1 | 2018-04-13T15:29:33.000Z | 2018-04-13T15:29:33.000Z | scala/scala_cross_version.bzl | andyscott/bazelbuild_rules_scala | 18889f641ed62dd09aeb71d1505f3834ccc33be3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@io_bazel_rules_scala//scala:scala_maven_import_external.bzl",
_scala_maven_import_external = "scala_maven_import_external",
)
"""Helper functions for Scala cross-version support. Encapsulates the logic
of abstracting over Scala major version (2.11, 2.12, etc) for dependency
resolution."""
def default_scala_version():
"""return the scala version for use in maven coordinates"""
return "2.11.12"
def default_scala_version_jar_shas():
return {
"scala_compiler": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
"scala_library": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
"scala_reflect": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04",
}
def extract_major_version(scala_version):
"""Return major Scala version given a full version, e.g. "2.11.11" -> "2.11" """
return scala_version[:scala_version.find(".", 2)]
def extract_major_version_underscore(scala_version):
"""Return major Scala version with underscore given a full version,
e.g. "2.11.11" -> "2_11" """
return extract_major_version(scala_version).replace(".", "_")
def default_scala_major_version():
return extract_major_version(default_scala_version())
def scala_mvn_artifact(
artifact,
major_scala_version = default_scala_major_version()):
"""Add scala version to maven artifact"""
gav = artifact.split(":")
groupid = gav[0]
artifactid = gav[1]
version = gav[2]
return "%s:%s_%s:%s" % (groupid, artifactid, major_scala_version, version)
def new_scala_default_repository(
scala_version,
scala_version_jar_shas,
maven_servers):
_scala_maven_import_external(
name = "io_bazel_rules_scala_scala_library",
artifact = "org.scala-lang:scala-library:{}".format(scala_version),
jar_sha256 = scala_version_jar_shas["scala_library"],
licenses = ["notice"],
server_urls = maven_servers,
)
_scala_maven_import_external(
name = "io_bazel_rules_scala_scala_compiler",
artifact = "org.scala-lang:scala-compiler:{}".format(scala_version),
jar_sha256 = scala_version_jar_shas["scala_compiler"],
licenses = ["notice"],
server_urls = maven_servers,
)
_scala_maven_import_external(
name = "io_bazel_rules_scala_scala_reflect",
artifact = "org.scala-lang:scala-reflect:{}".format(scala_version),
jar_sha256 = scala_version_jar_shas["scala_reflect"],
licenses = ["notice"],
server_urls = maven_servers,
)
| 39.530864 | 93 | 0.721424 |
acdec88628534d838ce98a893cf13a95c102555e | 1,298 | py | Python | JarvisAI/JarvisAI/setup.py | MaroonAngel/Jarvis_AI | 7757be9672c7eb4119aa8a2d3d8c8769ef9b6101 | [
"MIT"
] | null | null | null | JarvisAI/JarvisAI/setup.py | MaroonAngel/Jarvis_AI | 7757be9672c7eb4119aa8a2d3d8c8769ef9b6101 | [
"MIT"
] | null | null | null | JarvisAI/JarvisAI/setup.py | MaroonAngel/Jarvis_AI | 7757be9672c7eb4119aa8a2d3d8c8769ef9b6101 | [
"MIT"
] | null | null | null | import setuptools
from setuptools import find_namespace_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="JarvisAI",
version="3.5",
author="Dipesh",
author_email="dipeshpal17@gmail.com",
description="JarvisAI is AI python library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Dipeshpal/Jarvis_AI",
include_package_data=True,
packages=find_namespace_packages(include=['JarvisAI.*', 'JarvisAI']),
install_requires=['numpy', 'gtts==2.2.1', 'playsound==1.2.2',
'SpeechRecognition==3.8.1', 'pipwin==0.5.0', 'lxml==4.6.1', 'pyjokes',
'beautifulsoup4==4.9.3', 'wikipedia==1.4.0', 'auto_face_recognition', 'transformers==4.3.2',
'lazyme==0.0.23', 'librosa==0.8.0', "torch==1.7.1", "requests", "opencv-contrib-python==4.5.2.52",
"opencv-python==4.5.2.52", "cvzone==1.1.1", "pyttsx3", "googlesearch-python", "spacy",
"mediapipe==0.8.8"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 39.333333 | 120 | 0.607858 |
acdec8c74d0e42c3480a4ad10ba5bf517107beca | 6,941 | py | Python | dulwich/file.py | upstream-janitor/dulwich | a92ab5e826872ddac1cbd72fa1b6a41fe08c6834 | [
"Apache-2.0"
] | 624 | 2018-01-25T02:40:53.000Z | 2022-02-02T12:38:55.000Z | dulwich/file.py | upstream-janitor/dulwich | a92ab5e826872ddac1cbd72fa1b6a41fe08c6834 | [
"Apache-2.0"
] | 358 | 2015-01-06T11:36:42.000Z | 2022-03-20T01:09:47.000Z | dulwich/file.py | upstream-janitor/dulwich | a92ab5e826872ddac1cbd72fa1b6a41fe08c6834 | [
"Apache-2.0"
] | 166 | 2015-01-09T21:10:40.000Z | 2022-03-15T08:20:35.000Z | # file.py -- Safe access to git files
# Copyright (C) 2010 Google, Inc.
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Safe access to git files."""
import io
import os
import sys
def ensure_dir_exists(dirname):
"""Ensure a directory exists, creating if necessary."""
try:
os.makedirs(dirname)
except FileExistsError:
pass
def _fancy_rename(oldname, newname):
"""Rename file with temporary backup file to rollback if rename fails"""
if not os.path.exists(newname):
try:
os.rename(oldname, newname)
except OSError:
raise
return
# Defer the tempfile import since it pulls in a lot of other things.
import tempfile
# destination file exists
try:
(fd, tmpfile) = tempfile.mkstemp(".tmp", prefix=oldname, dir=".")
os.close(fd)
os.remove(tmpfile)
except OSError:
# either file could not be created (e.g. permission problem)
# or could not be deleted (e.g. rude virus scanner)
raise
try:
os.rename(newname, tmpfile)
except OSError:
raise # no rename occurred
try:
os.rename(oldname, newname)
except OSError:
os.rename(tmpfile, newname)
raise
os.remove(tmpfile)
def GitFile(filename, mode="rb", bufsize=-1, mask=0o644):
"""Create a file object that obeys the git file locking protocol.
Returns: a builtin file object or a _GitFile object
Note: See _GitFile for a description of the file locking protocol.
Only read-only and write-only (binary) modes are supported; r+, w+, and a
are not. To read and write from the same file, you can take advantage of
the fact that opening a file for write does not actually open the file you
request.
The default file mask makes any created files user-writable and
world-readable.
"""
if "a" in mode:
raise IOError("append mode not supported for Git files")
if "+" in mode:
raise IOError("read/write mode not supported for Git files")
if "b" not in mode:
raise IOError("text mode not supported for Git files")
if "w" in mode:
return _GitFile(filename, mode, bufsize, mask)
else:
return io.open(filename, mode, bufsize)
class FileLocked(Exception):
"""File is already locked."""
def __init__(self, filename, lockfilename):
self.filename = filename
self.lockfilename = lockfilename
super(FileLocked, self).__init__(filename, lockfilename)
class _GitFile(object):
"""File that follows the git locking protocol for writes.
All writes to a file foo will be written into foo.lock in the same
directory, and the lockfile will be renamed to overwrite the original file
on close.
Note: You *must* call close() or abort() on a _GitFile for the lock to be
released. Typically this will happen in a finally block.
"""
PROXY_PROPERTIES = set(
[
"closed",
"encoding",
"errors",
"mode",
"name",
"newlines",
"softspace",
]
)
PROXY_METHODS = (
"__iter__",
"flush",
"fileno",
"isatty",
"read",
"readline",
"readlines",
"seek",
"tell",
"truncate",
"write",
"writelines",
)
def __init__(self, filename, mode, bufsize, mask):
self._filename = filename
if isinstance(self._filename, bytes):
self._lockfilename = self._filename + b".lock"
else:
self._lockfilename = self._filename + ".lock"
try:
fd = os.open(
self._lockfilename,
os.O_RDWR | os.O_CREAT | os.O_EXCL | getattr(os, "O_BINARY", 0),
mask,
)
except FileExistsError:
raise FileLocked(filename, self._lockfilename)
self._file = os.fdopen(fd, mode, bufsize)
self._closed = False
for method in self.PROXY_METHODS:
setattr(self, method, getattr(self._file, method))
def abort(self):
"""Close and discard the lockfile without overwriting the target.
If the file is already closed, this is a no-op.
"""
if self._closed:
return
self._file.close()
try:
os.remove(self._lockfilename)
self._closed = True
except FileNotFoundError:
# The file may have been removed already, which is ok.
self._closed = True
def close(self):
"""Close this file, saving the lockfile over the original.
Note: If this method fails, it will attempt to delete the lockfile.
However, it is not guaranteed to do so (e.g. if a filesystem
becomes suddenly read-only), which will prevent future writes to
this file until the lockfile is removed manually.
Raises:
OSError: if the original file could not be overwritten. The
lock file is still closed, so further attempts to write to the same
file object will raise ValueError.
"""
if self._closed:
return
os.fsync(self._file.fileno())
self._file.close()
try:
if getattr(os, "replace", None) is not None:
os.replace(self._lockfilename, self._filename)
else:
if sys.platform != "win32":
os.rename(self._lockfilename, self._filename)
else:
# Windows versions prior to Vista don't support atomic
# renames
_fancy_rename(self._lockfilename, self._filename)
finally:
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __getattr__(self, name):
"""Proxy property calls to the underlying file."""
if name in self.PROXY_PROPERTIES:
return getattr(self._file, name)
raise AttributeError(name)
| 31.83945 | 80 | 0.614177 |
acdec904fc133234addd7e7ced6bd5f80b5425ae | 490 | py | Python | chat/apps.py | moustaphacheikh/pychat | 05a8b255efda03840a9ce8d39a9ee1d38b87ea67 | [
"MIT"
] | null | null | null | chat/apps.py | moustaphacheikh/pychat | 05a8b255efda03840a9ce8d39a9ee1d38b87ea67 | [
"MIT"
] | null | null | null | chat/apps.py | moustaphacheikh/pychat | 05a8b255efda03840a9ce8d39a9ee1d38b87ea67 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as etree
from django.apps import AppConfig
from django.conf import settings
class DefaultSettingsConfig(AppConfig):
name = 'chat'
verbose_name = 'pychat'
colors = {}
def load_config(self):
"""Loads default color scheme for html chat page """
tree = etree.parse(settings.BASE_DIR + '/chat/DefaultScheme.xml')
root = tree.getroot().find('colors')
for child in root:
self.colors[child.tag] = child.text
def ready(self):
self.load_config()
| 21.304348 | 67 | 0.72449 |
acdec909bc7e701724e0bbdac2a38c26d51de4bf | 3,333 | py | Python | api/cases/libraries/get_flags.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/cases/libraries/get_flags.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/cases/libraries/get_flags.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | from django.db.models import QuerySet, When, Case as DB_Case, IntegerField, BinaryField
from api.cases.enums import CaseTypeSubTypeEnum
from api.cases.models import Case
from api.flags.enums import FlagLevels
from api.flags.models import Flag
from api.flags.serializers import CaseListFlagSerializer
from api.teams.models import Team
def get_goods_flags(case, case_type):
if case_type in [
CaseTypeSubTypeEnum.STANDARD,
CaseTypeSubTypeEnum.EUA,
CaseTypeSubTypeEnum.EXHIBITION,
CaseTypeSubTypeEnum.GIFTING,
CaseTypeSubTypeEnum.F680,
]:
return Flag.objects.filter(goods__goods_on_application__application_id=case.id)
elif case_type in [
CaseTypeSubTypeEnum.OPEN,
CaseTypeSubTypeEnum.HMRC,
]:
return Flag.objects.filter(goods_type__application_id=case.id)
elif case_type == CaseTypeSubTypeEnum.GOODS:
return Flag.objects.filter(goods__good__id=case.id)
return Flag.objects.none()
def get_destination_flags(case, case_type):
if case_type == CaseTypeSubTypeEnum.EUA:
return Flag.objects.filter(parties__parties_on_application__application_id=case.id)
elif case_type == CaseTypeSubTypeEnum.OPEN:
return Flag.objects.filter(countries_on_applications__application_id=case.id) | Flag.objects.filter(
countries__countries_on_application__application_id=case.id
)
elif case_type == CaseTypeSubTypeEnum.STANDARD:
return Flag.objects.filter(
parties__parties_on_application__application_id=case.id,
parties__parties_on_application__deleted_at__isnull=True,
)
return Flag.objects.none()
def get_flags(case: Case) -> QuerySet:
"""
Get all case flags in no particular order
"""
# Ensure that case_type is prefetched, or an additional query will be made for each case.
case_type = case.case_type.sub_type
goods_flags = get_goods_flags(case, case_type)
destination_flags = get_destination_flags(case, case_type)
case_flags = case.flags.all()
org_flags = Flag.objects.filter(organisations__cases__id=case.id)
return goods_flags | destination_flags | case_flags | org_flags
def get_ordered_flags(case: Case, team: Team, limit: int = None):
"""
This function will get the flags for cases looking at good, destination, case, and organisation flags. The flags
will be ordered with your teams flags first, in order of category (same order as above), and priority
(lowest first).
:param case: case object the flags relate to
:param team: The team for user making the request
:param limit: If assigned will return no more than given amount
:return: List of flags serialized
"""
all_flags = get_flags(case)
all_flags = all_flags.annotate(
my_team=DB_Case(When(team_id=team.id, then=True), default=False, output_field=BinaryField()),
order=DB_Case(
When(level=FlagLevels.GOOD, then=0),
When(level=FlagLevels.DESTINATION, then=1),
When(level=FlagLevels.CASE, then=2),
default=3,
output_field=IntegerField(),
),
).order_by("-my_team", "order", "priority")
if limit:
all_flags = all_flags[:limit]
return CaseListFlagSerializer(all_flags, many=True).data
| 36.626374 | 116 | 0.717372 |
acdec9a5b1b0b85f719408bfc543cf9debb94021 | 13,978 | py | Python | prompt_toolkit/input/vt100_parser.py | andrii-dubytskyi/python-prompt-toolkit | b476a8c86787b16859f0819943494a7e5e49a2f9 | [
"BSD-3-Clause"
] | null | null | null | prompt_toolkit/input/vt100_parser.py | andrii-dubytskyi/python-prompt-toolkit | b476a8c86787b16859f0819943494a7e5e49a2f9 | [
"BSD-3-Clause"
] | null | null | null | prompt_toolkit/input/vt100_parser.py | andrii-dubytskyi/python-prompt-toolkit | b476a8c86787b16859f0819943494a7e5e49a2f9 | [
"BSD-3-Clause"
] | null | null | null | """
Parser for VT100 input stream.
"""
from __future__ import unicode_literals
import re
import six
from six.moves import range
from ..keys import Keys
from ..key_binding.key_processor import KeyPress
__all__ = [
'Vt100Parser',
]
# Regex matching any CPR response
# (Note that we use '\Z' instead of '$', because '$' could include a trailing
# newline.)
_cpr_response_re = re.compile('^' + re.escape('\x1b[') + r'\d+;\d+R\Z')
# Mouse events:
# Typical: "Esc[MaB*" Urxvt: "Esc[96;14;13M" and for Xterm SGR: "Esc[<64;85;12M"
_mouse_event_re = re.compile('^' + re.escape('\x1b[') + r'(<?[\d;]+[mM]|M...)\Z')
# Regex matching any valid prefix of a CPR response.
# (Note that it doesn't contain the last character, the 'R'. The prefix has to
# be shorter.)
_cpr_response_prefix_re = re.compile('^' + re.escape('\x1b[') + r'[\d;]*\Z')
_mouse_event_prefix_re = re.compile('^' + re.escape('\x1b[') + r'(<?[\d;]*|M.{0,2})\Z')
class _Flush(object):
""" Helper object to indicate flush operation to the parser. """
pass
# Mapping of vt100 escape codes to Keys.
ANSI_SEQUENCES = {
'\x00': Keys.ControlAt, # Control-At (Also for Ctrl-Space)
'\x01': Keys.ControlA, # Control-A (home)
'\x02': Keys.ControlB, # Control-B (emacs cursor left)
'\x03': Keys.ControlC, # Control-C (interrupt)
'\x04': Keys.ControlD, # Control-D (exit)
'\x05': Keys.ControlE, # Control-E (end)
'\x06': Keys.ControlF, # Control-F (cursor forward)
'\x07': Keys.ControlG, # Control-G
'\x08': Keys.ControlH, # Control-H (8) (Identical to '\b')
'\x09': Keys.ControlI, # Control-I (9) (Identical to '\t')
'\x0a': Keys.ControlJ, # Control-J (10) (Identical to '\n')
'\x0b': Keys.ControlK, # Control-K (delete until end of line; vertical tab)
'\x0c': Keys.ControlL, # Control-L (clear; form feed)
'\x0d': Keys.ControlM, # Control-M (13) (Identical to '\r')
'\x0e': Keys.ControlN, # Control-N (14) (history forward)
'\x0f': Keys.ControlO, # Control-O (15)
'\x10': Keys.ControlP, # Control-P (16) (history back)
'\x11': Keys.ControlQ, # Control-Q
'\x12': Keys.ControlR, # Control-R (18) (reverse search)
'\x13': Keys.ControlS, # Control-S (19) (forward search)
'\x14': Keys.ControlT, # Control-T
'\x15': Keys.ControlU, # Control-U
'\x16': Keys.ControlV, # Control-V
'\x17': Keys.ControlW, # Control-W
'\x18': Keys.ControlX, # Control-X
'\x19': Keys.ControlY, # Control-Y (25)
'\x1a': Keys.ControlZ, # Control-Z
'\x1b': Keys.Escape, # Also Control-[
'\x1c': Keys.ControlBackslash, # Both Control-\ (also Ctrl-| )
'\x1d': Keys.ControlSquareClose, # Control-]
'\x1e': Keys.ControlCircumflex, # Control-^
'\x1f': Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hyphen.)
# ASCII Delete (0x7f)
# Vt220 (and Linux terminal) send this when pressing backspace. We map this
# to ControlH, because that will make it easier to create key bindings that
# work everywhere, with the trade-off that it's no longer possible to
# handle backspace and control-h individually for the few terminals that
# support it. (Most terminals send ControlH when backspace is pressed.)
# See: http://www.ibb.net/~anne/keyboard.html
'\x7f': Keys.ControlH,
'\x1b[A': Keys.Up,
'\x1b[B': Keys.Down,
'\x1b[C': Keys.Right,
'\x1b[D': Keys.Left,
'\x1b[H': Keys.Home,
'\x1bOH': Keys.Home,
'\x1b[F': Keys.End,
'\x1bOF': Keys.End,
'\x1b[3~': Keys.Delete,
'\x1b[3;2~': Keys.ShiftDelete, # xterm, gnome-terminal.
'\x1b[3;5~': Keys.ControlDelete, # xterm, gnome-terminal.
'\x1b[1~': Keys.Home, # tmux
'\x1b[4~': Keys.End, # tmux
'\x1b[5~': Keys.PageUp,
'\x1b[6~': Keys.PageDown,
'\x1b[7~': Keys.Home, # xrvt
'\x1b[8~': Keys.End, # xrvt
'\x1b[Z': Keys.BackTab, # shift + tab
'\x1b[2~': Keys.Insert,
'\x1bOP': Keys.F1,
'\x1bOQ': Keys.F2,
'\x1bOR': Keys.F3,
'\x1bOS': Keys.F4,
'\x1b[[A': Keys.F1, # Linux console.
'\x1b[[B': Keys.F2, # Linux console.
'\x1b[[C': Keys.F3, # Linux console.
'\x1b[[D': Keys.F4, # Linux console.
'\x1b[[E': Keys.F5, # Linux console.
'\x1b[11~': Keys.F1, # rxvt-unicode
'\x1b[12~': Keys.F2, # rxvt-unicode
'\x1b[13~': Keys.F3, # rxvt-unicode
'\x1b[14~': Keys.F4, # rxvt-unicode
'\x1b[15~': Keys.F5,
'\x1b[17~': Keys.F6,
'\x1b[18~': Keys.F7,
'\x1b[19~': Keys.F8,
'\x1b[20~': Keys.F9,
'\x1b[21~': Keys.F10,
'\x1b[23~': Keys.F11,
'\x1b[24~': Keys.F12,
'\x1b[25~': Keys.F13,
'\x1b[26~': Keys.F14,
'\x1b[28~': Keys.F15,
'\x1b[29~': Keys.F16,
'\x1b[31~': Keys.F17,
'\x1b[32~': Keys.F18,
'\x1b[33~': Keys.F19,
'\x1b[34~': Keys.F20,
# Xterm
'\x1b[1;2P': Keys.F13,
'\x1b[1;2Q': Keys.F14,
# '\x1b[1;2R': Keys.F15, # Conflicts with CPR response.
'\x1b[1;2S': Keys.F16,
'\x1b[15;2~': Keys.F17,
'\x1b[17;2~': Keys.F18,
'\x1b[18;2~': Keys.F19,
'\x1b[19;2~': Keys.F20,
'\x1b[20;2~': Keys.F21,
'\x1b[21;2~': Keys.F22,
'\x1b[23;2~': Keys.F23,
'\x1b[24;2~': Keys.F24,
'\x1b[1;5A': Keys.ControlUp, # Cursor Mode
'\x1b[1;5B': Keys.ControlDown, # Cursor Mode
'\x1b[1;5C': Keys.ControlRight, # Cursor Mode
'\x1b[1;5D': Keys.ControlLeft, # Cursor Mode
'\x1b[1;2A': Keys.ShiftUp,
'\x1b[1;2B': Keys.ShiftDown,
'\x1b[1;2C': Keys.ShiftRight,
'\x1b[1;2D': Keys.ShiftLeft,
# Tmux sends following keystrokes when control+arrow is pressed, but for
# Emacs ansi-term sends the same sequences for normal arrow keys. Consider
# it a normal arrow press, because that's more important.
'\x1bOA': Keys.Up,
'\x1bOB': Keys.Down,
'\x1bOC': Keys.Right,
'\x1bOD': Keys.Left,
'\x1b[5A': Keys.ControlUp,
'\x1b[5B': Keys.ControlDown,
'\x1b[5C': Keys.ControlRight,
'\x1b[5D': Keys.ControlLeft,
'\x1bOc': Keys.ControlRight, # rxvt
'\x1bOd': Keys.ControlLeft, # rxvt
# Tmux (Win32 subsystem) sends the following scroll events.
'\x1b[62~': Keys.ScrollUp,
'\x1b[63~': Keys.ScrollDown,
'\x1b[200~': Keys.BracketedPaste, # Start of bracketed paste.
# Meta + arrow keys. Several terminals handle this differently.
# The following sequences are for xterm and gnome-terminal.
# (Iterm sends ESC followed by the normal arrow_up/down/left/right
# sequences, and the OSX Terminal sends ESCb and ESCf for "alt
# arrow_left" and "alt arrow_right." We don't handle these
# explicitly, in here, because would could not distinguish between
# pressing ESC (to go to Vi navigation mode), followed by just the
# 'b' or 'f' key. These combinations are handled in
# the input processor.)
'\x1b[1;3D': (Keys.Escape, Keys.Left),
'\x1b[1;3C': (Keys.Escape, Keys.Right),
'\x1b[1;3A': (Keys.Escape, Keys.Up),
'\x1b[1;3B': (Keys.Escape, Keys.Down),
# Sequences generated by numpad 5. Not sure what it means. (It doesn't
# appear in 'infocmp'. Just ignore.
'\x1b[E': Keys.Ignore, # Xterm.
'\x1b[G': Keys.Ignore, # Linux console.
}
class _IsPrefixOfLongerMatchCache(dict):
"""
Dictionary that maps input sequences to a boolean indicating whether there is
any key that start with this characters.
"""
def __missing__(self, prefix):
# (hard coded) If this could be a prefix of a CPR response, return
# True.
if (_cpr_response_prefix_re.match(prefix) or _mouse_event_prefix_re.match(prefix)):
result = True
else:
# If this could be a prefix of anything else, also return True.
result = any(v for k, v in ANSI_SEQUENCES.items() if k.startswith(prefix) and k != prefix)
self[prefix] = result
return result
_IS_PREFIX_OF_LONGER_MATCH_CACHE = _IsPrefixOfLongerMatchCache()
class Vt100Parser(object):
"""
Parser for VT100 input stream.
Data can be fed through the `feed` method and the given callback will be
called with KeyPress objects.
::
def callback(key):
pass
i = Vt100Parser(callback)
i.feed('data\x01...')
:attr feed_key_callback: Function that will be called when a key is parsed.
"""
# Lookup table of ANSI escape sequences for a VT100 terminal
# Hint: in order to know what sequences your terminal writes to stdin, run
# "od -c" and start typing.
def __init__(self, feed_key_callback):
assert callable(feed_key_callback)
self.feed_key_callback = feed_key_callback
self.reset()
def reset(self, request=False):
self._in_bracketed_paste = False
self._start_parser()
def _start_parser(self):
"""
Start the parser coroutine.
"""
self._input_parser = self._input_parser_generator()
self._input_parser.send(None)
def _get_match(self, prefix):
"""
Return the key that maps to this prefix.
"""
# (hard coded) If we match a CPR response, return Keys.CPRResponse.
# (This one doesn't fit in the ANSI_SEQUENCES, because it contains
# integer variables.)
if _cpr_response_re.match(prefix):
return Keys.CPRResponse
elif _mouse_event_re.match(prefix):
return Keys.Vt100MouseEvent
# Otherwise, use the mappings.
try:
return ANSI_SEQUENCES[prefix]
except KeyError:
return None
def _input_parser_generator(self):
"""
Coroutine (state machine) for the input parser.
"""
prefix = ''
retry = False
flush = False
while True:
flush = False
if retry:
retry = False
else:
# Get next character.
c = yield
if c == _Flush:
flush = True
else:
prefix += c
# If we have some data, check for matches.
if prefix:
is_prefix_of_longer_match = _IS_PREFIX_OF_LONGER_MATCH_CACHE[prefix]
match = self._get_match(prefix)
# Exact matches found, call handlers..
if (flush or not is_prefix_of_longer_match) and match:
self._call_handler(match, prefix)
prefix = ''
# No exact match found.
elif (flush or not is_prefix_of_longer_match) and not match:
found = False
retry = True
# Loop over the input, try the longest match first and
# shift.
for i in range(len(prefix), 0, -1):
match = self._get_match(prefix[:i])
if match:
self._call_handler(match, prefix[:i])
prefix = prefix[i:]
found = True
if not found:
self._call_handler(prefix[0], prefix[0])
prefix = prefix[1:]
def _call_handler(self, key, insert_text):
"""
Callback to handler.
"""
if isinstance(key, tuple):
for k in key:
self._call_handler(k, insert_text)
else:
if key == Keys.BracketedPaste:
self._in_bracketed_paste = True
self._paste_buffer = ''
else:
self.feed_key_callback(KeyPress(key, insert_text))
def feed(self, data):
"""
Feed the input stream.
:param data: Input string (unicode).
"""
assert isinstance(data, six.text_type)
# Handle bracketed paste. (We bypass the parser that matches all other
# key presses and keep reading input until we see the end mark.)
# This is much faster then parsing character by character.
if self._in_bracketed_paste:
self._paste_buffer += data
end_mark = '\x1b[201~'
if end_mark in self._paste_buffer:
end_index = self._paste_buffer.index(end_mark)
# Feed content to key bindings.
paste_content = self._paste_buffer[:end_index]
self.feed_key_callback(KeyPress(Keys.BracketedPaste, paste_content))
# Quit bracketed paste mode and handle remaining input.
self._in_bracketed_paste = False
remaining = self._paste_buffer[end_index + len(end_mark):]
self._paste_buffer = ''
self.feed(remaining)
# Handle normal input character by character.
else:
for i, c in enumerate(data):
if self._in_bracketed_paste:
# Quit loop and process from this position when the parser
# entered bracketed paste.
self.feed(data[i:])
break
else:
self._input_parser.send(c)
def flush(self):
"""
Flush the buffer of the input stream.
This will allow us to handle the escape key (or maybe meta) sooner.
The input received by the escape key is actually the same as the first
characters of e.g. Arrow-Up, so without knowing what follows the escape
sequence, we don't know whether escape has been pressed, or whether
it's something else. This flush function should be called after a
timeout, and processes everything that's still in the buffer as-is, so
without assuming any characters will follow.
"""
self._input_parser.send(_Flush)
def feed_and_flush(self, data):
"""
Wrapper around ``feed`` and ``flush``.
"""
self.feed(data)
self.flush()
| 34.771144 | 102 | 0.581271 |
acdecaa6c0fe092fbe75ee9c1d3e4e4fbd3cfee8 | 16,479 | py | Python | src/compas_rhino/geometry/surface.py | Licini/compas | 34f65adb3d0abc3f403312ffba62aa76f3376292 | [
"MIT"
] | null | null | null | src/compas_rhino/geometry/surface.py | Licini/compas | 34f65adb3d0abc3f403312ffba62aa76f3376292 | [
"MIT"
] | 9 | 2019-09-11T08:53:19.000Z | 2019-09-16T08:35:39.000Z | src/compas_rhino/geometry/surface.py | Licini/compas | 34f65adb3d0abc3f403312ffba62aa76f3376292 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas.datastructures import Mesh
from compas_rhino.geometry import RhinoGeometry
# from compas.geometry import subtract_vectors
# from compas.geometry import angle_vectors
# from compas.geometry import distance_point_point
from compas.utilities import geometric_key
__all__ = ['RhinoSurface']
class RhinoSurface(RhinoGeometry):
""""""
__module__ = 'compas_rhino.geometry'
def __init__(self):
super(RhinoSurface, self).__init__()
@classmethod
def from_guid(cls, guid):
obj = compas_rhino.find_object(guid)
surf = cls()
surf.guid = guid
surf.object = obj
surf.geometry = obj.Geometry
return surf
@classmethod
def from_object(cls, obj):
surf = cls()
surf.guid = obj.Id
surf.object = obj
surf.geometry = obj.Geometry
return surf
@classmethod
def from_selection(cls):
guid = compas_rhino.select_surface()
return cls.from_guid(guid)
def to_compas(self):
raise NotImplementedError
def brep_to_compas(self, cls=None):
if not self.geometry.HasBrepForm:
return
success, brep = self.geometry.TryConvertBrep()
if not success:
return
gkey_xyz = {}
faces = []
for loop in brep.Loops:
curve = loop.To3dCurve()
segments = curve.Explode()
face = []
sp = segments[0].PointAtStart
ep = segments[0].PointAtEnd
sp_gkey = geometric_key(sp)
ep_gkey = geometric_key(ep)
gkey_xyz[sp_gkey] = sp
gkey_xyz[ep_gkey] = ep
face.append(sp_gkey)
face.append(ep_gkey)
for segment in segments[1:-1]:
ep = segment.PointAtEnd
ep_gkey = geometric_key(ep)
face.append(ep_gkey)
gkey_xyz[ep_gkey] = ep
faces.append(face)
gkey_index = {gkey: index for index, gkey in enumerate(gkey_xyz)}
vertices = [list(xyz) for gkey, xyz in gkey_xyz.items()]
faces = [[gkey_index[gkey] for gkey in f] for f in faces]
cls = cls or Mesh
return cls.from_vertices_and_faces(vertices, faces)
# move to compas.datastructures.Mesh.from_meshgrid(RhinoSurface.to_meshgrid(over_space=True))?
def uv_to_compas(self, cls=None, density=(10, 10)):
return self.heightfield_to_compas(cls=cls, density=density, over_space=True)
# move to compas.datastructures.Mesh.from_meshgrid(RhinoSurface.to_meshgrid(over_space=False))?
def heightfield_to_compas(self, cls=None, density=(10, 10), over_space=False):
try:
u, v = density
except Exception:
u, v = density, density
vertices = self.heightfield(density=(u, v), over_space=over_space)
faces = []
for i in range(u - 1):
for j in range(v - 1):
face = ((i + 0) * v + j,
(i + 0) * v + j + 1,
(i + 1) * v + j + 1,
(i + 1) * v + j)
faces.append(face)
cls = cls or Mesh
return cls.from_vertices_and_faces(vertices, faces)
# ==========================================================================
#
# ==========================================================================
def space(self, density=10):
rs = compas_rhino.rs
rs.EnableRedraw(False)
try:
du, dv = density
except TypeError:
du = density
dv = density
density_u = int(du)
density_v = int(dv)
if rs.IsPolysurface(self.guid):
faces = rs.ExplodePolysurfaces(self.guid)
elif rs.IsSurface(self.guid):
faces = [self.guid]
else:
raise Exception('Object is not a surface.')
uv = []
for face in faces:
domain_u = rs.SurfaceDomain(face, 0)
domain_v = rs.SurfaceDomain(face, 1)
du = (domain_u[1] - domain_u[0]) / (density_u - 1)
dv = (domain_v[1] - domain_v[0]) / (density_v - 1)
# move to meshgrid function
for i in range(density_u):
for j in range(density_v):
uv.append((domain_u[0] + i * du, domain_v[0] + j * dv))
if len(faces) > 1:
rs.DeleteObjects(faces)
rs.EnableRedraw(True)
return uv
# not taking the heighfield over the space is only possible
# if the surface is in fact a height field
# split up and rename!
def heightfield(self, density=10, over_space=True):
rs = compas_rhino.rs
rs.EnableRedraw(False)
try:
du, dv = density
except TypeError:
du = density
dv = density
du = int(du)
dv = int(dv)
if rs.IsPolysurface(self.guid):
faces = rs.ExplodePolysurfaces(self.guid)
elif rs.IsSurface(self.guid):
faces = [self.guid]
else:
raise Exception('Object is not a surface.')
xyz = []
if over_space:
for guid in faces:
face = RhinoSurface.from_guid(guid)
uv = face.space(density)
for u, v in uv:
xyz.append(list(rs.EvaluateSurface(face.guid, u, v)))
else:
for guid in faces:
bbox = rs.BoundingBox(guid)
xmin = bbox[0][0]
xmax = bbox[1][0]
ymin = bbox[0][1]
ymax = bbox[3][1]
xstep = 1.0 * (xmax - xmin) / (du - 1)
ystep = 1.0 * (ymax - ymin) / (dv - 1)
seeds = []
for i in range(du):
for j in range(dv):
seed = xmin + i * xstep, ymin + j * ystep, 0
seeds.append(seed)
points = map(list, rs.ProjectPointToSurface(seeds, guid, [0, 0, 1]))
xyz += points
if len(faces) > 1:
rs.DeleteObjects(faces)
rs.EnableRedraw(True)
return xyz
# def descent(self, points=None):
# """"""
# if not points:
# points = self.heightfield()
# tol = rs.UnitAbsoluteTolerance()
# descent = []
# if rs.IsPolysurface(self.guid):
# rs.EnableRedraw(False)
# faces = {}
# for p0 in points:
# p = p0[:]
# p[2] -= 2 * tol
# bcp = rs.BrepClosestPoint(self.guid, p)
# uv = bcp[1]
# index = bcp[2][1]
# try:
# face = faces[index]
# except (TypeError, IndexError):
# face = rs.ExtractSurface(self.guid, index, True)
# faces[index] = face
# p1 = rs.EvaluateSurface(face, uv[0], uv[1])
# vector = [p1[_] - p0[_] for _ in range(3)]
# descent.append((p0, vector))
# rs.DeleteObjects(faces.values())
# rs.EnableRedraw(True)
# elif rs.IsSurface(self.guid):
# for p0 in points:
# p = p0[:]
# p[2] -= 2 * tol
# bcp = rs.BrepClosestPoint(self.guid, p)
# uv = bcp[1]
# p1 = rs.EvaluateSurface(self.guid, uv[0], uv[1])
# vector = [p1[_] - p0[_] for _ in range(3)]
# descent.append((p0, vector))
# else:
# raise Exception('Object is not a surface.')
# return descent
# def curvature(self, points=None):
# """"""
# if not points:
# points = self.heightfield()
# curvature = []
# if rs.IsPolysurface(self.guid):
# rs.EnableRedraw(False)
# faces = {}
# for point in points:
# bcp = rs.BrepClosestPoint(self.guid, point)
# uv = bcp[1]
# index = bcp[2][1]
# try:
# face = faces[index]
# except (TypeError, IndexError):
# face = rs.ExtractSurface(self.guid, index, True)
# faces[index] = face
# props = rs.SurfaceCurvature(face, uv)
# curvature.append((point, (props[1], props[3], props[5])))
# rs.DeleteObjects(faces.values())
# rs.EnableRedraw(False)
# elif rs.IsSurface(self.guid):
# for point in points:
# bcp = rs.BrepClosestPoint(self.guid, point)
# uv = bcp[1]
# props = rs.SurfaceCurvature(self.guid, uv)
# curvature.append((point, (props[1], props[3], props[5])))
# else:
# raise Exception('Object is not a surface.')
# return curvature
# def borders(self, type=1):
# """Duplicate the borders of the surface.
# Parameters
# ----------
# type : {0, 1, 2}
# The type of border.
# * 0: All borders
# * 1: The exterior borders.
# * 2: The interior borders.
# Returns
# -------
# list
# The GUIDs of the extracted border curves.
# """
# border = rs.DuplicateSurfaceBorder(self.guid, type=type)
# curves = rs.ExplodeCurves(border, delete_input=True)
# return curves
# def kinks(self, threshold=1e-3):
# """Return the XYZ coordinates of kinks, i.e. tangency discontinuities, along the surface's boundaries.
# Returns
# -------
# list
# The list of XYZ coordinates of surface boundary kinks.
# """
# kinks = []
# borders = self.borders(type=0)
# for border in borders:
# border = RhinoCurve(border)
# extremities = map(lambda x: rs.EvaluateCurve(border.guid, rs.CurveParameter(border.guid, x)), [0., 1.])
# if border.is_closed():
# start_tgt, end_tgt = border.tangents(extremities)
# if angle_vectors(start_tgt, end_tgt) > threshold:
# kinks += extremities
# else:
# kinks += extremities
# return list(set(kinks))
# def project_point(self, point, direction=(0, 0, 1)):
# projections = rs.ProjectPointToSurface(point, self.guid, direction)
# if not projections:
# return self.closest_point(point)
# return list(projections[0])
# def project_points(self, points, direction=(0, 0, 1), include_none=True):
# projections = rs.ProjectPointToSurface(points, self.guid, direction)
# if not projections:
# return self.closest_points(points)
# projections[:] = [self.closest_point(point) if not point else point for point in projections]
# return map(list, projections)
# def pull_point(self, point):
# pass
# def pull_points(self, points):
# pass
# def pull_curve(self, curve):
# pass
# def pull_curves(self, curves):
# pass
# def pull_mesh(self, mesh, fixed=None, d=1.0):
# if not fixed:
# fixed = []
# fixed = set(fixed)
# for key, attr in mesh.vertices(True):
# if key in fixed:
# continue
# xyz = mesh.vertex_coordinates(key)
# point = self.closest_point(xyz)
# dx, dy, dz = subtract_vectors(point, xyz)
# mesh.vertex[key]['x'] += d * dx
# mesh.vertex[key]['y'] += d * dy
# mesh.vertex[key]['z'] += d * dz
# def pull_meshes(self, meshes):
# pass
def closest_point(self, xyz):
"""Return the XYZ coordinates of the closest point on the surface from input XYZ-coordinates.
Parameters
----------
xyz : list
XYZ coordinates.
Returns
-------
list
The XYZ coordinates of the closest point on the surface.
"""
return compas_rhino.rs.EvaluateSurface(self.guid, * compas_rhino.rs.SurfaceClosestPoint(self.guid, xyz))
def closest_points(self, points):
return [self.closest_point(point) for point in points]
# def closest_point_on_boundaries(self, xyz):
# """Return the XYZ coordinates of the closest point on the boundaries of the surface from input XYZ-coordinates.
# Parameters
# ----------
# xyz : list
# XYZ coordinates.
# Returns
# -------
# list
# The XYZ coordinates of the closest point on the boundaries of the surface.
# """
# borders = self.borders(type=0)
# proj_dist = {tuple(proj_xyz): distance_point_point(xyz, proj_xyz) for proj_xyz in [RhinoCurve(border).closest_point(xyz) for border in borders]}
# delete_objects(borders)
# return min(proj_dist, key=proj_dist.get)
# def closest_points_on_boundaries(self, points):
# return [self.closest_point_on_boundaries(point) for point in points]
# # --------------------------------------------------------------------------
# # mapping
# # --------------------------------------------------------------------------
# def point_xyz_to_uv(self, xyz):
# """Return the UV point from the mapping of a XYZ point based on the UV parameterisation of the surface.
# Parameters
# ----------
# xyz : list
# (x, y, z) coordinates.
# Returns
# -------
# list
# The (u, v) coordinates of the mapped point.
# """
# return rs.SurfaceClosestPoint(self.guid, xyz)
# def point_uv_to_xyz(self, uv):
# """Return the XYZ point from the inverse mapping of a UV point based on the UV parameterisation of the surface.
# Parameters
# ----------
# uv : list
# (u, v) coordinates.
# Returns
# -------
# list
# The (x, y, z) coordinates of the inverse-mapped point.
# """
# u, v = uv
# return tuple(rs.EvaluateSurface(self.guid, *uv))
# def line_uv_to_xyz(self, line):
# """Return the XYZ points from the inverse mapping of a UV line based on the UV parameterisation of the surface.
# Parameters
# ----------
# uv : list
# List of (u, v) coordinates.
# Returns
# -------
# list
# The list of XYZ coordinates of the inverse-mapped line.
# """
# return (self.point_uv_to_xyz(line[0]), self.point_uv_to_xyz(line[1]))
# def polyline_uv_to_xyz(self, polyline):
# """Return the XYZ points from the inverse mapping of a UV polyline based on the UV parameterisation of the surface.
# Parameters
# ----------
# uv : list
# List of (u, v) coordinates.
# Returns
# -------
# list
# The list of (x, y, z) coordinates of the inverse-mapped polyline.
# """
# return [self.point_uv_to_xyz(vertex) for vertex in polyline]
# def mesh_uv_to_xyz(self, mesh):
# """Return the mesh from the inverse mapping of a UV mesh based on the UV parameterisation of the surface.
# The third coordinate of the mesh vertices is discarded.
# Parameters
# ----------
# mesh : Mesh
# A mesh.
# Returns
# -------
# mesh : Mesh
# The mesh once mapped back to the surface.
# """
# for vkey in mesh.vertices():
# x, y, z = self.point_uv_to_xyz(mesh.vertex_coordinates(vkey)[:2])
# mesh.vertex[vkey]['x'] = x
# mesh.vertex[vkey]['y'] = y
# mesh.vertex[vkey]['z'] = z
# return mesh
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
surface = RhinoSurface.from_selection()
print(surface.guid)
print(surface.object)
print(surface.geometry)
print(surface.type)
print(surface.name)
| 32.248532 | 154 | 0.511682 |
acdecacbc2c3bd9db6e80e3851fdc4fec7f6630c | 53,764 | py | Python | warrior_modules/warrior_netconf/warriornetconf/Actions/NetconfActions/netconf_Actions.py | warriorframework/warriorframework_py3 | fc268c610c429f5a60e5627c2405aa66036487dd | [
"Apache-2.0"
] | 6 | 2018-03-06T04:18:07.000Z | 2020-12-24T06:08:36.000Z | warrior_modules/warrior_netconf/warriornetconf/Actions/NetconfActions/netconf_Actions.py | warriorframework/warriorframework_py3 | fc268c610c429f5a60e5627c2405aa66036487dd | [
"Apache-2.0"
] | 51 | 2018-03-16T04:55:07.000Z | 2021-12-13T20:53:12.000Z | warrior_modules/warrior_netconf/warriornetconf/Actions/NetconfActions/netconf_Actions.py | warriorframework/warriorframework_py3 | fc268c610c429f5a60e5627c2405aa66036487dd | [
"Apache-2.0"
] | 7 | 2018-05-09T22:13:10.000Z | 2021-11-23T11:46:27.000Z | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""This is the netconf_actions module that has all netconf related keywords
ymizugaki 2017/07/11
"""
import time
from xml.dom.minidom import parseString
from warrior.Framework import Utils
from warrior.Framework.Utils.print_Utils import print_info, print_debug,\
print_warning, print_exception, print_error, print_without_logging
from warrior.Framework.Utils.testcase_Utils import pNote, pSubStep, report_substep_status
from warriornetconf.ClassUtils.netconf_utils_class import WNetConf
from warrior.Framework.Utils.encryption_utils import decrypt
from warrior.Framework.Utils.data_Utils import get_object_from_datarepository, update_datarepository, getSystemData, get_credentials, _get_system_or_subsystem
import re
from configobj import ConfigObj
from xml.etree import ElementTree
from warrior.Framework.Utils.config_Utils import data_repository
import os
class NetconfActions(object):
"""NetconfActions class which has methods(keywords)
related to actions performed on basic netconf interface """
def __init__(self):
""" Constructor for NetconfActions class """
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
self.netconf_object = WNetConf()
def request_rpc(self, system_name, session_name=None, request="",
xmlns="", request_type="", xmlns_tag="xmlns"):
""" Request operations through Netconf interface.
If value for 'request' is provided, it will be used for request
operations else the XML input will be taken from the netconf_data
file based on xmlns, request_type, xmlns_tag values.
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = Name of the session to the system
3. request(string) = command to be sent as xml string
4. xmlns(string) = XML namespace of the particular request
5. Request_type(string) = The operation that we want to perform
6. xmlns_tag(string) = xml tag for the particular request
for eg:
For request Type:
<init-pm xmlns="urn:params:xml:ns:yang:perfmon">
usage:
xmlns_tag = xmlns(default value, no need pass this argument)
xmlns = "urn:params:xml:ns:yang:perfmon"
request_type= "init-pm"
For Request Type :
<org-openroadm-de-operations:restart xmlns:
org-openroadm-de-operations="http://org/openroadm/de/operations">
usage:
xmlns_tag = "xmlns:org-openroadm-de-operations"
xmlns = "http://org/openroadm/de/operations"
request_type = "org-openroadm-de-operations:restart"
:Returns:
1. status = True/False/error
2. RPC replies in a list & it will be updated in the data
repository in key - [system_name]_request_rpc_reply.
"""
wdesc = "Request particular operation from the system"
pSubStep(wdesc)
reply_key = '{}_request_rpc_reply'.format(system_name)
reply_list = []
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.\
get_object_from_datarepository(session_id)
config_data_list = []
status = True
if request:
config_data_list = [request]
elif all([xmlns != "", request_type != "", xmlns_tag != ""]):
config_datafile = Utils.data_Utils.\
get_filepath_from_system(self.datafile, system_name,
'netconf_data')[0]
if config_datafile and Utils.file_Utils.\
fileExists(config_datafile):
status, config_data_list = Utils.data_Utils.\
get_nc_request_rpc_string(config_datafile, xmlns,
request_type, xmlns_tag)
else:
status = "error"
print_error("Datafile does not have any value for netconf_data tag "
"or the filepath mentioned in the netconf_data tag "
"does not exist", 'error')
else:
status = "error"
print_error("Please provide value(s) for 'request' or 'xmlns &"
" request_type'", 'error')
if status is True and config_data_list:
list_config_data = []
if not isinstance(config_data_list, list):
list_config_data.append(config_data_list)
else:
list_config_data = config_data_list
for config_data in list_config_data:
if config_data:
reply = netconf_object.request_rpc(config_data)
reply_list.append(reply)
print_info('Request RPC Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
sub_status = True
else:
print_error('Request RPC Failed {}'.format(
netconf_object.ErrorMessage), "error")
sub_status = False
else:
reply_list.append("error")
print_error('Request RPC Failed', "error")
sub_status = "error"
status = status and sub_status if sub_status != "error" \
else sub_status
report_substep_status(status)
return status, {reply_key: reply_list}
def connect_netconf(self, system_name='', session_name=None, credentials=None):
"""
Connects to the Netconf interface of the the given system or subsystems
:Datafile usage:
Tags or attributes to be used in input datafile for the system or subsystem
If both tag and attribute is provided the attribute will be used.
1. ip = IP address of the system/subsystem
2. nc_port = use this tag to provide ssh port to connect to Netconf \
interface, if not provided default port 830 will be used.
3. username = username for the ssh session
4. password = password for the ssh session
5. hostkey_verify = enables hostkey verification from ~/.ssh/known_hosts,\
if not provided the default value is to look into the path ~/.ssh/known_hosts.
6. protocol_version = netconf protocol version (1.0 or 1.1)
*** belows are not used, will be ignored. ***
7. timeout = use if you want to set timeout while connecting
8. allow_agent = enables querying SSH agent, if not provided the \
default value is to allow.
9. look_for_keys = enables looking in the usual locations for ssh keys,
if value is not provided the default value is to look for keys.
10. unknown_host_cb = This would be used when the server host key is not
recognized.
11. key_filename = where the private key can be found.
12. ssh_config = Enables parsing of OpenSSH configuration file.
13. device_params = netconf client device name, by default the name
"default" is used.
:Arguments:
1. system_name(string) = Name of the system from the input datafile.
2. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. session_id (dict element)= key, value
:DESCRIPTION:
This Keyword is used to connect to the netconf interface of the system.
The keyword upon executing saves the System_name and Session_id,
which can be used by all subsequent keywords in the test
to interact with the system through netconf interface.
"""
wdesc = "Connect to the netconf port of the system and creates a session"
pSubStep(wdesc)
output_dict = {}
session_parameters = ['ip', 'nc_port', 'username', 'password',
'hostkey_verify', 'protocol_version']
mapfile = data_repository.get('wt_mapfile', None)
if data_repository.get('wt_mapfile', None):
status, device_credentials = Utils.data_Utils.get_connection('CREDENTIALS', mapfile, system_name)
if status == False:
return False
if not system_name:
system_name = Utils.data_Utils.get_system_name(device_credentials)
data_repository['system_name'] = system_name
if not data_repository['system_name']:
print_error('Invalid system_name')
return False
status, session = Utils.data_Utils.get_connection('CREDENTIALS', mapfile, system_name)
if credentials:
if session.get('substitutions', None):
session.pop('substitutions')
session['username'] = credentials['username']
session['password'] = credentials['password']
status, session_credentials = Utils.data_Utils.replace_var(session, {}, {})
for v in session_credentials.values():
if re.search('{.*}', v):
print_error('Provide the substitution for variable', v)
return False
if status == False:
return False
protocol=session_credentials.get('protocol_version', None)
session_credentials['hostkey_verify'] = 'False'
if protocol == None:
session_credentials['protocol_version'] = False
else:
session_credentials = Utils.data_Utils.get_credentials(self.datafile,
system_name,
session_parameters)
session_credentials["password"] = decrypt(session_credentials["password"])
print_debug(system_name)
print_info(Utils.file_Utils.getDateTime())
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
status = self.netconf_object.open(session_credentials)
time.sleep(1)
if status:
temp = self.netconf_object.session_id
if temp is None:
status = False
else:
output_dict["netconf_session_id"] = self.netconf_object.session_id
print_info("netconf session-id = %s" % self.netconf_object.session_id)
output_dict[session_id] = self.netconf_object
data_repository["netconf_session_id"] = self.netconf_object.session_id
data_repository[session_id] = self.netconf_object
report_substep_status(status)
if output_dict:
return status, output_dict
else:
return status, {}
def close_netconf(self, system_name='', session_name=None):
"""
Request graceful termination of netconf session.
:Arguments:
1. system_name(string) = Name of the system in the input datafile
2. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Close response from the system to the data repository (data:reply.data(string)}
"""
if system_name == '':
system_name=data_repository.get('system_name' , None)
wdesc = "Request graceful termination of Netconf session"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
netconf_session_id = Utils.data_Utils.get_object_from_datarepository(
"netconf_session_id")
print_info("close session-id=%s" % netconf_session_id)
reply = netconf_object.close()
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('close-session: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
status = False
print_error('Close Netconf Failed {}'.format(
netconf_object.ErrorMessage), "error")
report_substep_status(status)
reply_key = '{}_close_netconf_reply'.format(system_name)
return status, {reply_key: reply}
def ne_request(self, command, system_name = '', timeout = '', session_name = None, dict_request = {}):
status = True
if system_name and system_name != data_repository.get('system_name', None):
print_error('system_name is incorrect')
return False
system_name = data_repository.get('system_name', None) if not system_name and data_repository.get('system_name', None) else system_name
if not system_name:
print_error('Invalid system_name')
return False
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(session_id)
step_num = data_repository["step_num"]
update_datarepository({"step_{0}_command".format(step_num): command})
reply = ''
mapfile = data_repository.get('wt_mapfile', None)
try:
status, mapper_data = Utils.data_Utils.replace_var(Utils.data_Utils.get_connection('MAP', mapfile)[1], {}, {})
if not status:
return False
# check if the MAP section is present in the cfg file
if mapper_data:
command = mapper_data.get(command, None)
# Get the command from the mapper file
if command and not re.search('{.*}', command):
# Get the request and optional data in the dictionary format
status_var, variables = Utils.data_Utils.get_connection('VARIABLES', mapfile)
status, config_data = Utils.data_Utils.replace_var(Utils.data_Utils.get_connection('COMMAND', command)[1], dict_request, variables)
if not status:
return False
status_opt, optional_data = Utils.data_Utils.replace_var(Utils.data_Utils.get_connection('OPTIONS', command)[1], dict_request, variables)
l = []
# Proceed if the optional data is given by the user
# We are replacing all the variables which the user provided inside {}
if optional_data:
# If the timeout is specified by the user, override the default timeout
if timeout:
reply = netconf_object.request_rpc(config_data['REQUEST'], int(timeout))
else:
reply = netconf_object.request_rpc(config_data['REQUEST'])
print_info('reply: {0}'.format(reply))
# Check if the user gave match string to compare with the response
if 'MATCH_STRING' in optional_data.keys():
# Check if the MATCH_STRING contains 'AND', 'OR', 'NOT'
status = Utils.data_Utils.check_match_string(optional_data['MATCH_STRING'], reply)
else:
reply = netconf_object.request_rpc(config_data['REQUEST'])
print_info('Reply: {0}'.format(reply))
else:
print_error('Provide the substitution for variable {0}'.format(command))
return False
except Exception as e:
status = False
print_error("exception found:", str(e))
return status
def get_config(self, datastore, system_name,
session_name=None,
filter_string=None,
filter_type="subtree"):
""" Retrieve all or part of a specified configuration through Netconf interface.
:Arguments:
1. datastore(string) = Name of the netconf datastore.
2. system_name(string) = Name of the system from the input datafile.
3. session_name(string) = Name of the session to the system.
4. filter_string(string) = xml string, by default entire configuration is \
retrieved.
5. filter_type(string) = Type of the Filter , subtree or xpath, default is subtree.
:Returns:
1. status(bool)= True / False
2. Get Response in the data repository {data:reply(xml)}
"""
wdesc = "Get system configuration data from the provided system"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.get_config(
datastore, filter_string, filter_type)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('get-config: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error("get-config: Failed {}".format(netconf_object.ErrorMessage))
status = False
report_substep_status(status)
reply_key = '{}_get_config_reply'.format(system_name)
return status, {reply_key: reply}
def copy_config(self, source, target, system_name, session_name=None):
"""Create or replace an entire configuration datastore
with the contents of another complete configuation datastore
:Arguments:
1. source(string) = name of the configuration datastore to use as the source of
the copy operation or config element containing the configuration subtree to copy.
2. target(string) = name of the configuration datastore to use as the destination
of the copy operation
3. system_name(string) = Name of the system from the input datafile
4. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Copy Response in the data repository {data:reply(xml)}
"""
wdesc = "Create or replace an entire configuration datastore with another datastore"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.copy_config(source, target)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('copy-config: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
status = False
print_error('copy-config: Failed {}'.format(netconf_object.ErrorMessage), "error")
report_substep_status(status)
reply_key = '{}_copy_config_reply'.format(system_name)
return status, {reply_key: reply}
def delete_config(self, datastore, system_name, session_name=None):
"""Delete a configuration datastore
:Arguments:
1. datastore(string) = name of the configuration datastore to be deleted
2. system_name(string) = Name of the system from the input datafile
3. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Delete Response in the data repository {data:reply(xml)}
"""
wdesc = "Delete system configuration data from the provided system"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.delete_config(datastore)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('delete-config: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('delete-config: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_delete_config_reply'.format(system_name)
return status, {reply_key: reply}
def discard_changes(self, system_name, session_name=None):
"""Revert the candidate configuration to the currently running configuration.
Uncommitted changes will be discarded.
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Discard Response in the data repository {data:reply(xml)}
"""
wdesc = "Discard any uncommitted changes to the candidate configuration"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.discard_changes()
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('discard-changes: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error(
'discard-changes: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_discard_changes_reply'.format(system_name)
return status, {reply_key: reply}
def edit_config(self, datastore, config, system_name,
session_name=None, default_operation=None, test_option=None, error_option=None):
""" Loads all or part of the specified config(from file) to the datastore
:Arguments:
1. datastore(string) = Name of datastore being edited
2. config(string) = The configuration.
Must be rooted in the config element. May be a string or Element
3. system_name(string) = Name of the system from the input datafile
4. session_name(string) = Name of the session to the system
5. default_operation(string) = [merge | replace | none (default)]
6. test_option(string) = [test-then-set | set | test-only | none (default)]
7. error_option(string) =
[stop-on-error | continue-on-error | rollback-on-error | none (default)]
rollback-on-error depends on :rollback-on-error capability
:Returns:
1. status(bool)= True / False
2. Edit Responses in the data repository {data:reply(xml)}
"""
wdesc = "Edit system configuration data"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
reply_key = '{}_edit_config_reply'.format(system_name)
reply_list = []
status = True
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
data_parameters = ['netconf_data']
config_datafile = Utils.data_Utils.get_filepath_from_system(self.datafile, system_name, 'netconf_data')[0]
var_configfile = Utils.data_Utils.get_filepath_from_system(self.datafile, system_name, 'variable_config')
if len(var_configfile) > 0:
var_configfile = var_configfile[0]
else:
var_configfile = None
if Utils.file_Utils.fileExists(config_datafile):
status, config_data_list = Utils.data_Utils.get_nc_config_string(config_datafile, config, var_configfile)
else:
config_data_list = []
status = "error"
if config_data_list:
for config_data in config_data_list:
if config_data:
reply = netconf_object.edit_config(datastore, config_data,
default_operation=default_operation,
test_option=test_option,
error_option=error_option)
reply_list.append(reply)
if netconf_object.isCOMPLD:
status = status and True if status != "error" else status
else:
print_error('Edit Config Failed {}'.format(netconf_object.ErrorMessage),\
"error")
status = status and False if status != "error" else status
break
else:
reply = "error"
print_error('Edit Config Failed', "error")
status = status and False
print_info('Edit Config Reply= {}'.format(reply_list))
report_substep_status(status)
return status, {reply_key: reply_list}
def commit(self, system_name,
confirmed=False, timeout=None, persist=None, persist_id=None, session_name=None):
"""Commit the candidate datastore as the device's new current configuration
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. confirmed(bool) = Commit is reverted if there is no followup commit
within the timeout interval.
3. timeout(int seconds) = The confirm timeout (Default=600 seconds)
4. persist(string) = persist-id
5. persist_id(string) = persist-id which specified in previous confirmed commit
6. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Commit Response in the data repository {data:reply(xml)}
"""
wdesc = "Commit the candidate datastore"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.commit(confirmed, timeout, persist, persist_id)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('commit: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('commit: Failed {}'.format(
netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_commit_reply'.format(system_name)
return status, {reply_key: reply}
def lock(self, datastore, system_name, session_name=None):
"""Lock the configuration system
:Arguments:
1. datastore(string) = name of the configuration datastore to be locked
2. system_name(string) = Name of the system from the input datafile
3. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Lock Response in the data repository {data:reply(xml)}
"""
wdesc = "Lock the configuration datastore"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.lock(datastore)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('lock: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('lock: Failed {}'.format(
netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_lock_reply'.format(system_name)
return status, {reply_key: reply}
def unlock(self, datastore, system_name, session_name=None):
"""Release the configuration lock
:Arguments:
1. datastore(string) = name of the configuration datastore to be unlocked
2. system_name(string) = Name of the system from the input datafile
3. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Unlock Response in the data repository {data:reply(xml)}
"""
wdesc = "Unlock the configuration datastore"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.unlock(datastore)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('unlock: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('unlock: Failed {}'.format(
netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_unlock_reply'.format(system_name)
return status, {reply_key: reply}
def get(self, system_name, session_name=None, filter_string=None, filter_type=None):
"""Retrieve operational state information.
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = Name of the session to the system
3. filter_string(string) = specifies the portion of the state information to retrieve
(by default entire state information is retrieved)
4. filter_type(string) = subtree or xpath
:Returns:
1. status(bool)= True / False
2. Retrieve Response in the data repository {data:reply(xml)}
"""
wdesc = "Retrieve operational state information (get rpc)."
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.get(filter_string, filter_type)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('get: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('get: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_get_config_reply'.format(system_name)
return status, {reply_key: reply}
def kill_session(self, system_name, netconf_session_id=None, session_name=None):
"""Force the termination of a NETCONF session (not the current one!)
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. netconf_session_id(string) = session-id of netconf
3. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Kill Response in the data repository {data:reply(xml)}
"""
wdesc = "Force the termination of a NETCONF session (not the current one!)"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
if not netconf_session_id:
netconf_session_id = "0"
reply = netconf_object.kill_session(netconf_session_id)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('kill-session: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('kill-session: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_kill_session_netconf_reply'.format(system_name)
return status, {reply_key: reply}
def validate(self, datastore, system_name, session_name=None):
""""Validate the contents of the specified configuration.
:Arguments:
1. datastore(string) = Name of the configuration datastore to be validated
2. system_name(string) = Name of the system from the input datafile
3. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Validation Response in the data repository {data:reply(xml)}
"""
wdesc = "Validate the contents of the specified configuration."
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.validate(datastore)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('validate: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('validate: Failed {}'.format(
netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_validate_netconf_reply'.format(system_name)
return status, {reply_key: reply}
def edit_config_from_string(self, datastore, config, system_name,
session_name=None, default_operation=None,
test_option=None, error_option=None):
""" Loads all or part of the specified config(not file) to the datastore
:Arguments:
1. datastore(string) = Name of datastore being edited
2. config(string) = The configuration xml string.
3. system_name(string) = Name of the system from the input datafile
4. session_name(string) = Name of the session to the system
5. default_operation(string) = [merge | replace | none (default)]
6. test_option(string) = [test_then_set | set | test-only | none (default)]
7. error_option(string) = [stop-on-error | continue-on-error
| rollback-on-error | none (default)]
rollback-on-error depends on :rollback-on-error capability
:Returns:
1. status(bool)= True / False
2. Edit Response in the data repository {data:reply(xml)}
"""
wdesc = "Edit system configuration data"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.edit_config(datastore, config,
default_operation=default_operation,
test_option=test_option,
error_option=error_option)
if netconf_object.isCOMPLD:
status = True
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('edit-config: Reply= {}'.format(reply))
else:
print_error('edit-config: Reply= {}'.format(reply))
print_error('edit-config: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_edit_config_reply'.format(system_name)
return status, {reply_key: reply}
def create_subscription(self, system_name,
session_name=None,
stream_from=None,
filter_string=None,
filter_type="subtree",
start_time=None,
stop_time=None):
""" create-subscription to receive netconf event notification
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = Name of the session to the system
3. stream_from(string) = NETCONF/SNMP/syslog ..
4. filter_string(string) = specifies the portion of the events to receive notification
by default entire events is reported
5. filter_type(string) = xpath or subtree(default)
6. start_time(string) = start time
7. stop_time(string) = stop time
:Returns:
1. status(bool)= True / False
2. Subscription Response in the data repository {data:reply(xml)}
"""
wdesc = "create-subscription to receive netconf event notification"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.create_subscription(stream_from,
filter_string,
filter_type,
start_time,
stop_time)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('create-subscription: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error(
'create-subscription: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_create_subscription_reply'.format(system_name)
return status, {reply_key: reply}
def waitfor_subscription(self, system_name, wait_string, namespace_string,
namespace_prefix, timeout=600, session_name=None):
"""Wait for specified notification event
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. waitString(string) = xpath string with namespace prefix
e.g.
for checking single data
waitString = ".//ns:event[./ns:eventClass/text()='fault']"
Note that "ns" = namespace prefix
for checking multiple data
waitString = ".//ns1:event1[text()='fault1'] and
.//ns1:event2[text()='fault2']"
3. namespaceString(list of string) = list of namespace string
separated by comma
e.g., namespaceString = "namespace_value1,namespace_value2"
4. namespacePrefix(list of string) = list of namespace prefix
separated by comma
e.g.,
namespaceprefix = "ns1,ns2"
5. timeout(integer) = timeout value in second, default=600
6. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
2. Match String Response in the data repository {data:reply(xml)}
E.g., Assuming the following notification is the one received:
****************************
<?xml version="1.0" encoding="UTF-8"?>
<notification xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">
<eventTime>2015-08-10T10:36:58.427756-07:00</eventTime>
<netconf-config-change xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications">
<changed-by>
<username>admin</username>
<session-id>0</session-id>
<source-host>127.0.0.1</source-host>
</changed-by>
<datastore>running</datastore>
<edit>
<target xmlns:notif="http://tail-f.com/ns/test/notif">/notif:test</target>
<operation>replace</operation>
</edit>
</netconf-config-change>
</notification>
****************************
for the notification received above, please find the appropriate
argument and its values for checking username, source-host and target
in this notification as follows:
waitstring = ".//ns1:username[text()='admin'] and
.//ns1:source-host[text()='127.0.0.1'] and
.//ns2:target[text()='/notif:test']"
namespaceString = "urn:ietf:params:xml:ns:netconf:notification:1.0,
http://tail-f.com/ns/test/notif"
namespacePrefix = "ns1,ns2"
Caveat: This keyword does not validate XMLSchema for notification.
"""
wdesc = ("waitfor_subscription to wait specified netconf event "
"notification")
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
namespace_dict = {}
prefixes = [prefix.strip() for prefix in namespace_prefix.split(",")]
namespaces = [ns.strip() for ns in namespace_string.split(",")]
if len(prefixes) != len(namespaces):
print_error("the number of prefixes and namespaces should match", "error")
print_error("Number of prefixes ({}) != Number of namespaces({})".format(
len(prefixes), len(namespaces)), "error")
return False
for (prefix, namespace) in zip(prefixes, namespaces):
namespace_dict[prefix] = namespace
temp_waitstring = (wait_string, namespace_dict)
print_info("waiting for %s timeout=%s ..." % (wait_string, str(timeout)))
status = netconf_object.waitfor_subscription(temp_waitstring,
int(timeout))
if status:
print_info("waitfor %s received" % wait_string)
else:
print_info("waitfor %s timeouted" % wait_string, "error")
report_substep_status(status)
return status,{"match_string": wait_string}
def testfor_killsession(self, system_name, session_name=None):
"""kill-session test keyword
create another session to same NE and kills it.
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = Name of the session to the system
:Returns:
1. status(bool)= True / False
"""
wdesc = "kill-session, create another session and kill it"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
test_netconf = WNetConf()
session_parameters = ['ip', 'nc_port', 'username', 'password',
'allow_agent', 'hostkey_verify', 'look_for_keys',
'timeout', 'device_name']
session_credentials = Utils.data_Utils.get_credentials(self.datafile,
system_name,
session_parameters)
session_credentials["password"] = decrypt(session_credentials["password"])
if test_netconf.open(session_credentials):
time.sleep(1)
sid = test_netconf.session_id
status, reply = self.kill_session(system_name, sid, session_name)
else:
status = False
if status:
pNote("kill-session PASS")
else:
print_error("kill-session FAIL", "error")
report_substep_status(status)
return status
def cancel_commit(self, system_name, persist_id=None, session_name=None):
"""cancel-commit
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. persist_id(string) = persist-id which specified in confirm-commit
3. session_name(string) = name of the session to the system
:Returns:
1. command_status(bool)
2. {data:reply.data(xml)}
"""
wdesc = "cancel-commit"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.cancel_commit(persist_id)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('cancel-commit: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('cancel-commit: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_request_rpc_reply'.format(system_name)
return status, {reply_key: reply}
def clear_notification_buffer(self, system_name, session_name=None):
"""clear notification buffer
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = name of the session to the system
:Returns:
1. command_status(bool) = always true
"""
wdesc = "clear notification buffer"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
netconf_object.clear_notification_buffer()
report_substep_status(True)
return True
def clear_notification_buffer_all(self, system_name, session_name=None):
"""clear notification buffer for all netconf instances.
(except this instance)
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. session_name(string) = name of the session to the system
:Returns:
1. command_status(bool) = always true
"""
wdesc = "clear notification buffer all"
# pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
temp_dict = Utils.config_Utils.data_repository
for s0, s1 in list(temp_dict.items()):
if s0 != session_id and isinstance(s1, WNetConf):
s1.clear_notification_buffer()
# report_substep_status(True)
return True
def get_schema(self, system_name, identifier, version_number=None, format_type=None, session_name=None):
"""get-schema rpc
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. identifier(string) = schema id (name of a yang module, e.g. ietf-alarms)
3. version_number(string) = version number (e.g. 1.0)
4. format_type(string) = schema format (e.g. yang)
5. session_name(string) = name if the session to the system
:Returns:
1. command_status(bool)
2. {data:reply.data(xml)}
"""
wdesc = "get-schema"
pSubStep(wdesc)
print_debug(system_name)
print_debug(self.datafile)
self.clear_notification_buffer_all(system_name, session_name)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(
session_id)
reply = netconf_object.get_schema(
identifier, version_number, format_type)
if reply:
reply = parseString(reply).toprettyxml(indent=" ")
print_info('get-schema: Reply= {}'.format(reply))
if netconf_object.isCOMPLD:
status = True
else:
print_error('get-schema: Failed {}'.format(netconf_object.ErrorMessage), "error")
status = False
report_substep_status(status)
reply_key = '{}_get_schema_reply'.format(system_name)
return status, {reply_key: reply}
def print_notification_buffer(self, system_name, notification_type=None, session_name=None):
"""print notification buffer
:Arguments:
1. system_name (string) = system name
2. notification_type (string) = a notification type to be displayed.
e.g. netconf-config-change or netconf-session-end etc...
if empty then display all.
3. session_name (string) = session name
:Returns:
1. status (bool)
"""
if notification_type is not None:
wdesc = "print notification buffer: type=%s" % notification_type
else:
wdesc = "print notification buffer all"
pNote(wdesc)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(session_id)
notification_data = netconf_object.get_notification_buffer(notification_type)
if len(notification_data) != 0:
for notif in notification_data:
print_info(notif)
else:
pNote("notification data is empty")
return True
def clear_notification_buffer_for_print(self, system_name, session_name=None):
"""clear the notification print buffer
:Arguments:
1. system_name (string) = system name
2. session_name (string) = session name
:Returns:
1. status (bool)
"""
wdesc = "clear the notification print buffer"
pNote(wdesc)
session_id = Utils.data_Utils.get_session_id(system_name, session_name)
netconf_object = Utils.data_Utils.get_object_from_datarepository(session_id)
return netconf_object.clear_notification_buffer_for_print()
| 48.392439 | 158 | 0.613143 |
acdecad833425711a590a35dfbdf8ff0fa5b55ab | 4,808 | py | Python | scripts/gpr_demo_marlik.py | karalleyna/pyprobml | 72195e46fdffc4418910e76d02e3d6469f4ce272 | [
"MIT"
] | null | null | null | scripts/gpr_demo_marlik.py | karalleyna/pyprobml | 72195e46fdffc4418910e76d02e3d6469f4ce272 | [
"MIT"
] | null | null | null | scripts/gpr_demo_marlik.py | karalleyna/pyprobml | 72195e46fdffc4418910e76d02e3d6469f4ce272 | [
"MIT"
] | null | null | null | # Example of a Gaussian Process Regression with multiple local minima
# in the marginal log-likelihood as a function of the hyperparameters
# Based on: https://github.com/probml/pmtk3/blob/master/demos/gprDemoMarglik.m
# Authors: Drishti Patel & Gerardo Durán-Martín
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from numpy.linalg import inv, slogdet
from scipy.optimize import minimize
def k(u, v, sigma_f, l=1):
return sigma_f ** 2 * np.exp(-(u - v) ** 2 / (2 * l ** 2))
def gp_predictive_post(xstar, x, y, k, sigma_y, *args, **kwargs):
"""
Compute predictive distribution of a 1D-Gaussian Process for
regression
Parameters
----------
xstar: array(nt, 1)
Values to perform inference on
x: array(n, 1)
Training independent variables
y: array(n, 1)
Training dependent variables
k: function
Kernel function to evaluate the GP
sigma_y: float
data-noise term
*args: additional arguments of k
**kwargs: additional keyword-arguments of k
Returns
-------
* array(nt, 1):
Array of predicted (mean) values
* array(nt, nt):
Posterior covariance matrix
"""
n, _ = x.shape
kstar = k(x, xstar.T, *args, **kwargs)
Kxx = k(x, x.T, *args) + sigma_y ** 2 * np.eye(n)
kxx_star = k(xstar, xstar.T, *args, **kwargs)
Kxx_inv = inv(Kxx)
ystar = kstar.T @ Kxx_inv @ y
Sigma_post = kxx_star - kstar.T @ Kxx_inv @ kstar
return ystar, Sigma_post
def log_likelihood(x, y, sigma_f, l, sigma_y):
"""
Compute marginal log-likelihood of a regression GP
with rbf kernel
Parameters
----------
x: array(n, 1)
Training independent variables
y: array(n, 1)
Training dependent variables
sigma_f: float
Vertical-scale parameter
l: float
Horizontal-scale parameter
sigma_y: float
data noise
Returns
-------
* float:
Marginal log-likelihood as the specified hyperparameters
"""
n, _ = x.shape
x = x / np.exp(l)
Kxx = k(x, x.T, sigma_f) + np.exp(2 * sigma_y) * np.eye(n)
_, DKxx = slogdet(Kxx)
l = -1/2 * (y.T @ inv(Kxx) @ y + DKxx + n * np.log(2 * np.pi))
return l.item()
def plot_gp_pred(x, y, xstar, k, sigma_f, l, sigma_y, ax):
ystar, Sigma_post = gp_predictive_post(xstar, x, y, k, sigma_y, sigma_f, l)
upper_bound = ystar.ravel() + 2 * np.sqrt(np.diag(Sigma_post))
lower_bound = ystar.ravel() - 2 * np.sqrt(np.diag(Sigma_post))
ax.scatter(x, y, marker="+", s=100, c="black")
ax.plot(xstar, ystar, c="black")
ax.fill_between(xstar.ravel(), lower_bound, upper_bound, color="tab:gray", alpha=0.3, edgecolor="none")
ax.set_xlim(-7.5, 7.5)
ax.set_ylim(-2, 2.5)
def plot_marginal_likelihood_surface(x, y, sigma_f, l_space, sigma_y_space, ax, levels=None):
P = np.stack(np.meshgrid(l_space, sigma_y_space), axis=0)
Z = np.apply_along_axis(lambda p: log_likelihood(x, y, sigma_f, *p), 0, P)
ax.contour(*np.exp(P), Z, levels=levels)
ax.set_xlabel("characteristic length scale")
ax.set_ylabel("noise standard deviation")
ax.set_xscale("log")
ax.set_yscale("log")
if __name__ == "__main__":
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
sigma_f=1.0
x = np.array([-1.3089, 6.7612, 1.0553, -1.1734, -2.9339, 7.2530, -6.5843])[:, None]
y = np.array([1.6218, 1.8558, 0.4102, 1.2526, -0.0133, 1.6380, 0.2189])[:, None]
xstar = np.linspace(-7.5, 7.5, 201)
ngrid = 41
l_space = np.linspace(np.log(0.5), np.log(80), ngrid)
sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)
P = np.stack(np.meshgrid(l_space, sigma_y_space), axis=0)
configs = [(1.0, 0.2), (10, 0.8)]
fig, ax = plt.subplots()
plot_gp_pred(x, y, xstar, k, sigma_f, *configs[0], ax)
pml.savefig("gpr_config0.pdf")
fig, ax = plt.subplots()
plot_gp_pred(x, y, xstar, k, sigma_f, *configs[1], ax)
pml.savefig("gpr_config1.pdf")
ngrid = 41
w01 = np.array([np.log(1), np.log(0.1)])
w02 = np.array([np.log(10), np.log(0.8)])
s0 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w01)
s1 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w02)
levels = -np.array([8.3, 8.5, 8.9, 9.3, 9.8, 11.5, 15])[::-1]
l_space = np.linspace(np.log(0.5), np.log(80), ngrid)
sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)
fig, ax = plt.subplots()
plot_marginal_likelihood_surface(x, y, sigma_f, l_space, sigma_y_space, ax, levels=levels)
plt.scatter(*np.exp(s0.x), marker="+", s=100, c="tab:blue")
plt.scatter(*np.exp(s1.x), marker="+", s=100, c="tab:blue")
pml.savefig("gpr_marginal_likelihood.pdf")
plt.show()
| 33.158621 | 107 | 0.623128 |
acdecb0d0f7a61996bad66b35268e0226d1e7bbf | 1,425 | py | Python | components/utils/log.py | whosxavierwu/e2e-nlg-challenge-2017 | 79538fb89bac51060cea1f4059272ff297c544db | [
"Apache-2.0"
] | 25 | 2017-12-22T20:44:01.000Z | 2022-03-16T08:57:53.000Z | components/utils/log.py | whosxavierwu/e2e-nlg-challenge-2017 | 79538fb89bac51060cea1f4059272ff297c544db | [
"Apache-2.0"
] | 1 | 2021-01-27T23:14:25.000Z | 2021-02-14T11:53:07.000Z | components/utils/log.py | whosxavierwu/e2e-nlg-challenge-2017 | 79538fb89bac51060cea1f4059272ff297c544db | [
"Apache-2.0"
] | 13 | 2018-04-02T16:11:51.000Z | 2021-03-17T19:54:56.000Z | import logging
import os
import sys
def set_logger(stdout_level=logging.INFO, log_fn=None):
"""
Set python logger for this experiment.
Based on:
https://stackoverflow.com/questions/25187083/python-logging-to-multiple-handlers-at-different-log-levels
:param stdout_level:
:param log_fn:
:return:
"""
# create formatters
simple_formatter = logging.Formatter("%(name)s:%(levelname)s: %(message)s")
detailed_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# get a top-level "mypackage" logger,
# set its log level to DEBUG,
# BUT PREVENT IT from propagating messages to the root logger
logger = logging.getLogger('experiment')
logger.setLevel(logging.DEBUG)
logger.propagate = 0
# create a console handler
# and set its log level to the command-line option
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(getattr(logging, stdout_level))
console_handler.setFormatter(simple_formatter)
logger.addHandler(console_handler)
if log_fn:
# create a file handler
# and set its log level to DEBUG
log_fn = os.path.abspath(log_fn)
file_handler = logging.FileHandler(log_fn)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(detailed_formatter)
logger.addHandler(file_handler)
return logger
| 30.319149 | 112 | 0.700351 |
acdecba22e718b26121255caccff6471db5bd521 | 3,776 | py | Python | mypy/test/testpythoneval.py | zyga/mypy | 5b7e222568cd20c31cde4e02adc9fd77d949197a | [
"PSF-2.0"
] | 1 | 2019-06-16T07:05:32.000Z | 2019-06-16T07:05:32.000Z | mypy/test/testpythoneval.py | zyga/mypy | 5b7e222568cd20c31cde4e02adc9fd77d949197a | [
"PSF-2.0"
] | null | null | null | mypy/test/testpythoneval.py | zyga/mypy | 5b7e222568cd20c31cde4e02adc9fd77d949197a | [
"PSF-2.0"
] | null | null | null | """Test cases for running mypy programs using a Python interpreter.
Each test case type checks a program then runs it using Python. The
output (stdout) of the program is compared to expected output. Type checking
uses full builtins and other stubs.
Note: Currently Python interpreter paths are hard coded.
Note: These test cases are *not* included in the main test suite, as including
this suite would slow down the main suite too much.
"""
import os
import os.path
import subprocess
import sys
import typing
from mypy.myunit import Suite, run_test, SkipTestCaseException
from mypy.test.config import test_data_prefix, test_temp_dir
from mypy.test.data import parse_test_cases
from mypy.test.helpers import assert_string_arrays_equal
# Files which contain test case descriptions.
python_eval_files = ['pythoneval.test',
'python2eval.test']
# Path to Python 3 interpreter
python3_path = 'python3'
default_python2_interpreter = 'python'
class PythonEvaluationSuite(Suite):
def cases(self):
c = []
for f in python_eval_files:
c += parse_test_cases(os.path.join(test_data_prefix, f),
test_python_evaluation, test_temp_dir, True)
return c
def test_python_evaluation(testcase):
python2_interpreter = try_find_python2_interpreter()
# Use Python 2 interpreter if running a Python 2 test case.
if testcase.name.lower().endswith('python2'):
if not python2_interpreter:
# Skip, can't find a Python 2 interpreter.
raise SkipTestCaseException()
interpreter = python2_interpreter
args = ['--py2']
py2 = True
else:
interpreter = python3_path
args = []
py2 = False
# Write the program to a file.
program = '_program.py'
outfile = '_program.out'
f = open(program, 'w')
for s in testcase.input:
f.write('{}\n'.format(s))
f.close()
# Set up module path.
typing_path = os.path.join(os.getcwd(), 'lib-typing', '3.2')
assert os.path.isdir(typing_path)
os.environ['PYTHONPATH'] = os.pathsep.join([typing_path, '.'])
os.environ['MYPYPATH'] = '.'
# Type check the program.
process = subprocess.Popen([python3_path,
os.path.join('scripts', 'mypy')] + args + [program],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outb = process.stdout.read()
# Split output into lines.
out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()]
if not process.wait():
if py2:
typing_path = os.path.join(os.getcwd(), 'lib-typing', '2.7')
os.environ['PYTHONPATH'] = os.pathsep.join([typing_path, '.'])
process = subprocess.Popen([interpreter, program],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
outb = process.stdout.read()
# Split output into lines.
out += [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()]
# Remove temp file.
os.remove(program)
assert_string_arrays_equal(testcase.output, out,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
def try_find_python2_interpreter():
try:
process = subprocess.Popen([default_python2_interpreter, '-V'], stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if b'Python 2.7' in stderr:
return default_python2_interpreter
else:
return None
except OSError:
return False
if __name__ == '__main__':
run_test(PythonEvaluationSuite(), sys.argv[1:])
| 34.327273 | 95 | 0.626059 |
acdecbc4c72f5046cae5edd72375d7010bcbfb5a | 4,601 | py | Python | bin/analysis/numbering/readmodify.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 6 | 2015-09-19T18:22:33.000Z | 2020-11-29T15:21:17.000Z | bin/analysis/numbering/readmodify.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2015-08-04T08:03:46.000Z | 2015-08-04T08:03:46.000Z | bin/analysis/numbering/readmodify.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2019-12-09T08:27:09.000Z | 2019-12-09T08:27:09.000Z | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from language.python import ast
class ReadModifyInfo(object):
__slots__ = 'localRead', 'localModify', 'fieldRead', 'fieldModify'
def __init__(self):
self.localRead = set()
self.localModify = set()
self.fieldRead = set()
self.fieldModify = set()
def update(self, other):
self.localRead.update(other.localRead)
self.localModify.update(other.localModify)
self.fieldRead.update(other.fieldRead)
self.fieldModify.update(other.fieldModify)
class FindReadModify(TypeDispatcher):
def getListInfo(self, l):
info = ReadModifyInfo()
for child in l:
info.update(self(child))
return info
@dispatch(ast.Existing, ast.Code, ast.DoNotCare, ast.leafTypes)
def visitLeaf(self, node, info):
pass
@dispatch(ast.Local)
def visitLocal(self, node, info):
info.localRead.add(node)
@dispatch(ast.Allocate)
def visitAllocate(self, node, info):
# TODO what about type/field nullification?
node.visitChildrenArgs(self, info)
@dispatch(ast.Load, ast.Check)
def visitMemoryExpr(self, node, info):
node.visitChildrenArgs(self, info)
info.fieldRead.update(node.annotation.reads[0])
info.fieldModify.update(node.annotation.modifies[0])
@dispatch(ast.Store)
def visitStore(self, node):
info = ReadModifyInfo()
node.visitChildrenArgs(self, info)
info.fieldRead.update(node.annotation.reads[0])
info.fieldModify.update(node.annotation.modifies[0])
self.lut[node] = info
return info
@dispatch(ast.DirectCall, ast.Call, ast.MethodCall)
def visitDirectCall(self, node, info):
node.visitChildrenArgs(self, info)
info.fieldRead.update(node.annotation.reads[0])
info.fieldModify.update(node.annotation.modifies[0])
@dispatch(ast.Assign)
def visitAssign(self, node):
info = ReadModifyInfo()
self(node.expr, info)
info.localModify.update(node.lcls)
self.lut[node] = info
return info
@dispatch(ast.Return)
def visitReturn(self, node):
info = ReadModifyInfo()
self(node.exprs, info)
self.lut[node] = info
return info
@dispatch(ast.Discard)
def visitDiscard(self, node):
info = ReadModifyInfo()
self(node.expr, info)
self.lut[node] = info
return info
@dispatch(list)
def visitList(self, node, info):
for child in node:
self(child, info)
@dispatch(ast.Suite)
def visitSuite(self, node):
info = self.getListInfo(node.blocks)
self.lut[node] = info
return info
@dispatch(ast.For)
def visitFor(self, node):
info = ReadModifyInfo()
info.update(self(node.loopPreamble))
info.localRead.add(node.iterator)
info.localModify.add(node.index)
info.update(self(node.bodyPreamble))
info.update(self(node.body))
info.update(self(node.else_))
self.lut[node] = info
return info
@dispatch(ast.Condition)
def visitCondition(self, node):
info = ReadModifyInfo()
info.update(self(node.preamble))
info.localRead.add(node.conditional)
self.lut[node] = info
return info
@dispatch(ast.While)
def visitWhile(self, node):
info = ReadModifyInfo()
info.update(self(node.condition))
info.update(self(node.body))
info.update(self(node.else_))
self.lut[node] = info
return info
@dispatch(ast.Switch)
def visitSwitch(self, node):
info = ReadModifyInfo()
info.update(self(node.condition))
info.update(self(node.t))
info.update(self(node.f))
self.lut[node] = info
return info
@dispatch(ast.TypeSwitchCase)
def visitTypeSwitchCase(self, node):
info = ReadModifyInfo()
info.localModify.add(node.expr)
info.update(self(node.body))
self.lut[node] = info
return info
@dispatch(ast.TypeSwitch)
def visitTypeSwitch(self, node):
info = ReadModifyInfo()
info.localRead.add(node.conditional)
for case in node.cases:
info.update(self(case))
self.lut[node] = info
return info
@dispatch(ast.OutputBlock)
def visitOutputBlock(self, node):
info = ReadModifyInfo()
for output in node.outputs:
self(output.expr, info)
self.lut[node] = info
return info
def processCode(self, code):
self.lut = {}
self(code.ast)
return self.lut
| 24.87027 | 74 | 0.729624 |
acdecc5f68f94fbf78e51a5c1e8d829ca0ea7dfb | 5,033 | py | Python | fhir/resources/DSTU2/organization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/DSTU2/organization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/DSTU2/organization.py | mmabey/fhir.resources | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Organization) on 2019-05-14.
# 2019, SMART Health IT.
from . import (address, backboneelement, codeableconcept, contactpoint,
domainresource, fhirreference, humanname, identifier)
class Organization(domainresource.DomainResource):
""" A grouping of people or organizations with a common purpose.
A formally or informally recognized grouping of people or organizations
formed for the purpose of achieving some form of collective action.
Includes companies, institutions, corporations, departments, community
groups, healthcare practice groups, etc.
"""
resource_name = "Organization"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether the organization's record is still in active use.
Type `bool`. """
self.address = None
""" An address for the organization.
List of `Address` items (represented as `dict` in JSON). """
self.contact = None
""" Contact for the organization for a certain purpose.
List of `OrganizationContact` items (represented as `dict` in JSON). """
self.identifier = None
""" Identifies this organization across multiple systems.
List of `Identifier` items (represented as `dict` in JSON). """
self.name = None
""" Name used for the organization.
Type `str`. """
self.partOf = None
""" The organization of which this organization forms a part.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.telecom = None
""" A contact detail for the organization.
List of `ContactPoint` items (represented as `dict` in JSON). """
self.type = None
""" Kind of organization.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(Organization, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Organization, self).elementProperties()
js.extend(
[
("active", "active", bool, False, None, False),
("address", "address", address.Address, True, None, False),
("contact", "contact", OrganizationContact, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("name", "name", str, False, None, False),
("partOf", "partOf", fhirreference.FHIRReference, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
]
)
return js
class OrganizationContact(backboneelement.BackboneElement):
""" Contact for the organization for a certain purpose.
"""
resource_name = "OrganizationContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" Visiting or postal addresses for the contact.
Type `Address` (represented as `dict` in JSON). """
self.name = None
""" A name associated with the contact.
Type `HumanName` (represented as `dict` in JSON). """
self.purpose = None
""" The type of contact.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.telecom = None
""" Contact details (telephone, email, etc.) for a contact.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(OrganizationContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(OrganizationContact, self).elementProperties()
js.extend(
[
("address", "address", address.Address, False, None, False),
("name", "name", humanname.HumanName, False, None, False),
(
"purpose",
"purpose",
codeableconcept.CodeableConcept,
False,
None,
False,
),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
]
)
return js
| 38.128788 | 103 | 0.607391 |
acdecc8948837ba4ffc79d028581f30ebea4e71b | 66,352 | py | Python | python/foglamp/services/core/scheduler/scheduler.py | vaibhav-ScaleDB/FogLAMP | 445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8 | [
"Apache-2.0"
] | null | null | null | python/foglamp/services/core/scheduler/scheduler.py | vaibhav-ScaleDB/FogLAMP | 445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8 | [
"Apache-2.0"
] | null | null | null | python/foglamp/services/core/scheduler/scheduler.py | vaibhav-ScaleDB/FogLAMP | 445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""FogLAMP Scheduler module"""
import asyncio
import collections
import datetime
import logging
import math
import time
import uuid
import os
import subprocess
import signal
from typing import List
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.common import logger
from foglamp.common.audit_logger import AuditLogger
from foglamp.services.core.scheduler.entities import *
from foglamp.services.core.scheduler.exceptions import *
from foglamp.common.storage_client.exceptions import *
from foglamp.common.storage_client.payload_builder import PayloadBuilder
from foglamp.common.storage_client.storage_client import StorageClientAsync
from foglamp.services.core.service_registry.service_registry import ServiceRegistry
from foglamp.services.core.service_registry import exceptions as service_registry_exceptions
from foglamp.services.common import utils
__author__ = "Terris Linenbach, Amarendra K Sinha, Massimiliano Pinto"
__copyright__ = "Copyright (c) 2017-2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
# FOGLAMP_ROOT env variable
_FOGLAMP_ROOT = os.getenv("FOGLAMP_ROOT", default='/usr/local/foglamp')
_SCRIPTS_DIR = os.path.expanduser(_FOGLAMP_ROOT + '/scripts')
class Scheduler(object):
"""FogLAMP Task Scheduler
Starts and tracks 'tasks' that run periodically,
start-up, and/or manually.
Schedules specify when to start and restart Tasks. A Task
is an operating system process. ScheduleProcesses
specify process/command name and parameters.
Most methods are coroutines and use the default
event loop to create tasks.
Usage:
- Call :meth:`start`
- Wait
- Call :meth:`stop`
"""
# TODO: Document the fields
_ScheduleRow = collections.namedtuple('ScheduleRow', ['id', 'name', 'type', 'time', 'day',
'repeat', 'repeat_seconds', 'exclusive',
'enabled', 'process_name'])
"""Represents a row in the schedules table"""
class _TaskProcess(object):
"""Tracks a running task with some flags"""
__slots__ = ['task_id', 'process', 'cancel_requested', 'schedule', 'start_time', 'future']
def __init__(self):
self.task_id = None # type: uuid.UUID
self.process = None # type: asyncio.subprocess.Process
self.cancel_requested = None # type: int
"""Epoch time when cancel was requested"""
self.schedule = None # Schedule._ScheduleRow
self.start_time = None # type: int
"""Epoch time when the task was started"""
self.future = None
# TODO: Methods that accept a schedule and look in _schedule_executions
# should accept schedule_execution instead. Add reference to schedule
# in _ScheduleExecution.
class _ScheduleExecution(object):
"""Tracks information about schedules"""
__slots__ = ['next_start_time', 'task_processes', 'start_now']
def __init__(self):
self.next_start_time = None
"""When to next start a task for the schedule"""
self.task_processes = dict()
"""dict of task id to _TaskProcess"""
self.start_now = False
"""True when a task is queued to start via :meth:`start_task`"""
# Constant class attributes
_DEFAULT_MAX_RUNNING_TASKS = 50
"""Maximum number of running tasks allowed at any given time"""
_DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS = 30
"""Maximum age of rows in the task table that have finished, in days"""
_DELETE_TASKS_LIMIT = 500
"""The maximum number of rows to delete in the tasks table in a single transaction"""
_HOUR_SECONDS = 3600
_DAY_SECONDS = 3600 * 24
_WEEK_SECONDS = 3600 * 24 * 7
_ONE_HOUR = datetime.timedelta(hours=1)
_ONE_DAY = datetime.timedelta(days=1)
_MAX_SLEEP = 9999999
"""When there is nothing to do, sleep for this number of seconds (forever)"""
_STOP_WAIT_SECONDS = 5
"""Wait this number of seconds in :meth:`stop` for tasks to stop"""
_PURGE_TASKS_FREQUENCY_SECONDS = _DAY_SECONDS
"""How frequently to purge the tasks table"""
# Mostly constant class attributes
_logger = None # type: logging.Logger
_core_management_host = None
_core_management_port = None
_storage = None
_storage_async = None
def __init__(self, core_management_host=None, core_management_port=None):
"""Constructor"""
cls = Scheduler
# Initialize class attributes
if not cls._logger:
cls._logger = logger.setup(__name__, level=logging.INFO)
# cls._logger = logger.setup(__name__, level=logging.DEBUG)
if not cls._core_management_port:
cls._core_management_port = core_management_port
if not cls._core_management_host:
cls._core_management_host = core_management_host
# Instance attributes
self._storage_async = None
self._ready = False
"""True when the scheduler is ready to accept API calls"""
self._start_time = None # type: int
"""When the scheduler started"""
self._max_running_tasks = None # type: int
"""Maximum number of tasks that can execute at any given time"""
self._paused = False
"""When True, the scheduler will not start any new tasks"""
self._process_scripts = dict()
"""Dictionary of scheduled_processes.name to script"""
self._schedules = dict()
"""Dictionary of schedules.id to _ScheduleRow"""
self._schedule_executions = dict()
"""Dictionary of schedules.id to _ScheduleExecution"""
self._task_processes = dict()
"""Dictionary of tasks.id to _TaskProcess"""
self._check_processes_pending = False
"""bool: True when request to run check_processes"""
self._scheduler_loop_task = None # type: asyncio.Task
"""Task for :meth:`_scheduler_loop`, to ensure it has finished"""
self._scheduler_loop_sleep_task = None # type: asyncio.Task
"""Task for asyncio.sleep used by :meth:`_scheduler_loop`"""
self.current_time = None # type: int
"""Time to use when determining when to start tasks, for testing"""
self._last_task_purge_time = None # type: int
"""When the tasks table was last purged"""
self._max_completed_task_age = None # type: datetime.timedelta
"""Delete finished task rows when they become this old"""
self._purge_tasks_task = None # type: asyncio.Task
"""asynico task for :meth:`purge_tasks`, if scheduled to run"""
@property
def max_completed_task_age(self) -> datetime.timedelta:
return self._max_completed_task_age
@max_completed_task_age.setter
def max_completed_task_age(self, value: datetime.timedelta) -> None:
if not isinstance(value, datetime.timedelta):
raise TypeError("value must be a datetime.timedelta")
self._max_completed_task_age = value
@property
def max_running_tasks(self) -> int:
"""Returns the maximum number of tasks that can run at any given time
"""
return self._max_running_tasks
@max_running_tasks.setter
def max_running_tasks(self, value: int) -> None:
"""Alters the maximum number of tasks that can run at any given time
Use 0 or a negative value to suspend task creation
"""
self._max_running_tasks = value
self._resume_check_schedules()
def _resume_check_schedules(self):
"""Wakes up :meth:`_scheduler_loop` so that
:meth:`_check_schedules` will be called the next time 'await'
is invoked.
"""
if self._scheduler_loop_sleep_task:
try:
self._scheduler_loop_sleep_task.cancel()
self._scheduler_loop_sleep_task = None
except RuntimeError:
self._check_processes_pending = True
else:
self._check_processes_pending = True
async def _wait_for_task_completion(self, task_process: _TaskProcess) -> None:
exit_code = await task_process.process.wait()
schedule = task_process.schedule
self._logger.info(
"Process terminated: Schedule '%s' process '%s' task %s pid %s exit %s,"
" %s running tasks\n%s",
schedule.name,
schedule.process_name,
task_process.task_id,
task_process.process.pid,
exit_code,
len(self._task_processes) - 1,
self._process_scripts[schedule.process_name])
schedule_execution = self._schedule_executions[schedule.id]
del schedule_execution.task_processes[task_process.task_id]
schedule_deleted = False
# Pick up modifications to the schedule
# Or maybe it's been deleted
try:
schedule = self._schedules[schedule.id]
except KeyError:
schedule_deleted = True
if self._paused or schedule_deleted or (
schedule.repeat is None and not schedule_execution.start_now):
if schedule_execution.next_start_time:
schedule_execution.next_start_time = None
self._logger.info(
"Tasks will no longer execute for schedule '%s'", schedule.name)
elif schedule.exclusive:
self._schedule_next_task(schedule)
if schedule.type != Schedule.Type.STARTUP:
if exit_code < 0 and task_process.cancel_requested:
state = Task.State.CANCELED
else:
state = Task.State.COMPLETE
# Update the task's status
update_payload = PayloadBuilder() \
.SET(exit_code=exit_code,
state=int(state),
end_time=str(datetime.datetime.now())) \
.WHERE(['id', '=', str(task_process.task_id)]) \
.payload()
try:
self._logger.debug('Database command: %s', update_payload)
res = await self._storage_async.update_tbl("tasks", update_payload)
except Exception:
self._logger.exception('Update failed: %s', update_payload)
# Must keep going!
# Due to maximum running tasks reached, it is necessary to
# look for schedules that are ready to run even if there
# are only manual tasks waiting
# TODO Do this only if len(_task_processes) >= max_processes or
# an exclusive task finished and ( start_now or schedule.repeats )
self._resume_check_schedules()
# This must occur after all awaiting. The size of _task_processes
# is used by stop() to determine whether the scheduler can stop.
del self._task_processes[task_process.task_id]
async def _start_task(self, schedule: _ScheduleRow) -> None:
"""Starts a task process
Raises:
EnvironmentError: If the process could not start
"""
# This check is necessary only if significant time can elapse between "await" and
# the start of the awaited coroutine.
args = self._process_scripts[schedule.process_name]
# add core management host and port to process script args
args_to_exec = args.copy()
args_to_exec.append("--port={}".format(self._core_management_port))
args_to_exec.append("--address=127.0.0.1")
args_to_exec.append("--name={}".format(schedule.name))
task_process = self._TaskProcess()
task_process.start_time = time.time()
try:
process = await asyncio.create_subprocess_exec(*args_to_exec, cwd=_SCRIPTS_DIR)
except EnvironmentError:
self._logger.exception(
"Unable to start schedule '%s' process '%s'\n%s",
schedule.name, schedule.process_name, args_to_exec)
raise
task_id = uuid.uuid4()
task_process.process = process
task_process.schedule = schedule
task_process.task_id = task_id
# All tasks including STARTUP tasks go into both self._task_processes and self._schedule_executions
self._task_processes[task_id] = task_process
self._schedule_executions[schedule.id].task_processes[task_id] = task_process
self._logger.info(
"Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s",
schedule.name, schedule.process_name, task_id, process.pid,
len(self._task_processes), args_to_exec)
# Startup tasks are not tracked in the tasks table and do not have any future associated with them.
if schedule.type != Schedule.Type.STARTUP:
# The task row needs to exist before the completion handler runs
insert_payload = PayloadBuilder() \
.INSERT(id=str(task_id),
pid=(self._schedule_executions[schedule.id].
task_processes[task_id].process.pid),
schedule_name=schedule.name,
process_name=schedule.process_name,
state=int(Task.State.RUNNING),
start_time=str(datetime.datetime.now())) \
.payload()
try:
self._logger.debug('Database command: %s', insert_payload)
res = await self._storage_async.insert_into_tbl("tasks", insert_payload)
except Exception:
self._logger.exception('Insert failed: %s', insert_payload)
# The process has started. Regardless of this error it must be waited on.
self._task_processes[task_id].future = asyncio.ensure_future(self._wait_for_task_completion(task_process))
async def purge_tasks(self):
"""Deletes rows from the tasks table"""
if self._paused:
return
if not self._ready:
raise NotReadyError()
delete_payload = PayloadBuilder() \
.WHERE(["state", "!=", int(Task.State.RUNNING)]) \
.AND_WHERE(["start_time", "<", str(datetime.datetime.now() - self._max_completed_task_age)]) \
.LIMIT(self._DELETE_TASKS_LIMIT) \
.payload()
try:
self._logger.debug('Database command: %s', delete_payload)
while not self._paused:
res = await self._storage_async.delete_from_tbl("tasks", delete_payload)
# TODO: Uncomment below when delete count becomes available in storage layer
# if res.get("count") < self._DELETE_TASKS_LIMIT:
break
except Exception:
self._logger.exception('Delete failed: %s', delete_payload)
raise
finally:
self._purge_tasks_task = None
self._last_task_purge_time = time.time()
def _check_purge_tasks(self):
"""Schedules :meth:`_purge_tasks` to run if sufficient time has elapsed
since it last ran
"""
if self._purge_tasks_task is None and (self._last_task_purge_time is None or (
time.time() - self._last_task_purge_time) >= self._PURGE_TASKS_FREQUENCY_SECONDS):
self._purge_tasks_task = asyncio.ensure_future(self.purge_tasks())
async def _check_schedules(self):
"""Starts tasks according to schedules based on the current time"""
earliest_start_time = None
# Can not iterate over _schedule_executions - it can change mid-iteration
for schedule_id in list(self._schedule_executions.keys()):
if self._paused or len(self._task_processes) >= self._max_running_tasks:
return None
schedule_execution = self._schedule_executions[schedule_id]
try:
schedule = self._schedules[schedule_id]
except KeyError:
# The schedule has been deleted
if not schedule_execution.task_processes:
del self._schedule_executions[schedule_id]
continue
if schedule.enabled is False:
continue
if schedule.exclusive and schedule_execution.task_processes:
continue
# next_start_time is None when repeat is None until the
# task completes, at which time schedule_execution is removed
next_start_time = schedule_execution.next_start_time
if not next_start_time and not schedule_execution.start_now:
if not schedule_execution.task_processes:
del self._schedule_executions[schedule_id]
continue
if next_start_time and not schedule_execution.start_now:
now = self.current_time if self.current_time else time.time()
right_time = now >= next_start_time
else:
right_time = False
if right_time or schedule_execution.start_now:
# Start a task
if not right_time:
# Manual start - don't change next_start_time
pass
elif schedule.exclusive:
# Exclusive tasks won't start again until they terminate
# Or the schedule doesn't repeat
next_start_time = None
else:
# _schedule_next_task alters next_start_time
self._schedule_next_task(schedule)
next_start_time = schedule_execution.next_start_time
await self._start_task(schedule)
# Queued manual execution is ignored when it was
# already time to run the task. The task doesn't
# start twice even when nonexclusive.
# The choice to put this after "await" above was
# deliberate. The above "await" could have allowed
# queue_task() to run. The following line
# will undo that because, after all, the task started.
schedule_execution.start_now = False
# Keep track of the earliest next_start_time
if next_start_time and (earliest_start_time is None or
earliest_start_time > next_start_time):
earliest_start_time = next_start_time
return earliest_start_time
async def _scheduler_loop(self):
"""Main loop for the scheduler"""
# TODO: log exception here or add an exception handler in asyncio
while True:
next_start_time = await self._check_schedules()
if self._paused:
break
self._check_purge_tasks()
# Determine how long to sleep
if self._check_processes_pending:
self._check_processes_pending = False
sleep_seconds = 0
elif next_start_time:
sleep_seconds = next_start_time - time.time()
else:
sleep_seconds = self._MAX_SLEEP
if sleep_seconds > 0:
self._logger.debug("Sleeping for %s seconds", sleep_seconds)
self._scheduler_loop_sleep_task = (
asyncio.ensure_future(asyncio.sleep(sleep_seconds)))
try:
await self._scheduler_loop_sleep_task
self._scheduler_loop_sleep_task = None
except asyncio.CancelledError:
self._logger.debug("Main loop awakened")
else:
# Relinquish control for each loop iteration to avoid starving
# other coroutines
await asyncio.sleep(0)
def _schedule_next_timed_task(self, schedule, schedule_execution, current_dt):
"""Handle daylight savings time transitions.
Assume 'repeat' is not null.
"""
if schedule.repeat_seconds is not None and schedule.repeat_seconds < self._DAY_SECONDS:
# If repeat is less than a day, use the current hour.
# Ignore the hour specified in the schedule's time.
dt = datetime.datetime(
year=current_dt.year,
month=current_dt.month,
day=current_dt.day,
hour=current_dt.hour,
minute=schedule.time.minute,
second=schedule.time.second)
if current_dt.time() > schedule.time:
# It's already too late. Try for an hour later.
dt += self._ONE_HOUR
else:
dt = datetime.datetime(
year=current_dt.year,
month=current_dt.month,
day=current_dt.day,
hour=schedule.time.hour,
minute=schedule.time.minute,
second=schedule.time.second)
if current_dt.time() > schedule.time:
# It's already too late. Try for tomorrow
dt += self._ONE_DAY
# Advance to the correct day if specified
if schedule.day:
while dt.isoweekday() != schedule.day:
dt += self._ONE_DAY
schedule_execution.next_start_time = time.mktime(dt.timetuple())
def _schedule_next_task(self, schedule) -> None:
"""Computes the next time to start a task for a schedule.
For nonexclusive schedules, this method is called after starting
a task automatically (it is not called when a task is started
manually).
For exclusive schedules, this method is called after the task
has completed.
"""
if schedule.enabled is False:
return
schedule_execution = self._schedule_executions[schedule.id]
advance_seconds = schedule.repeat_seconds
if self._paused or advance_seconds is None:
schedule_execution.next_start_time = None
self._logger.info(
"Tasks will no longer execute for schedule '%s'", schedule.name)
return
now = time.time()
if (schedule.exclusive and schedule_execution.next_start_time and
now < schedule_execution.next_start_time):
# The task was started manually
# Or the schedule was modified after the task started (AVOID_ALTER_NEXT_START)
return
if advance_seconds:
advance_seconds *= max([1, math.ceil(
(now - schedule_execution.next_start_time) / advance_seconds)])
if schedule.type == Schedule.Type.TIMED:
# Handle daylight savings time transitions
next_dt = datetime.datetime.fromtimestamp(schedule_execution.next_start_time)
next_dt += datetime.timedelta(seconds=advance_seconds)
if schedule.day is not None and next_dt.isoweekday() != schedule.day:
# Advance to the next matching day
next_dt = datetime.datetime(year=next_dt.year,
month=next_dt.month,
day=next_dt.day)
self._schedule_next_timed_task(schedule, schedule_execution, next_dt)
else:
schedule_execution.next_start_time = time.mktime(next_dt.timetuple())
else:
if schedule.type == Schedule.Type.MANUAL:
schedule_execution.next_start_time = time.time()
schedule_execution.next_start_time += advance_seconds
self._logger.info(
"Scheduled task for schedule '%s' to start at %s", schedule.name,
datetime.datetime.fromtimestamp(schedule_execution.next_start_time))
def _schedule_first_task(self, schedule, current_time):
"""Determines the time when a task for a schedule will start.
Args:
schedule: The schedule to consider
current_time:
Epoch time to use as the current time when determining
when to schedule tasks
"""
if schedule.enabled is False:
return
if schedule.type == Schedule.Type.MANUAL:
return
try:
schedule_execution = self._schedule_executions[schedule.id]
except KeyError:
schedule_execution = self._ScheduleExecution()
self._schedule_executions[schedule.id] = schedule_execution
if schedule.type == Schedule.Type.INTERVAL:
advance_seconds = schedule.repeat_seconds
# When modifying a schedule, this is imprecise if the
# schedule is exclusive and a task is running. When the
# task finishes, next_start_time will be incremented
# by at least schedule.repeat, thus missing the interval at
# start_time + advance_seconds. Fixing this required an if statement
# in _schedule_next_task. Search for AVOID_ALTER_NEXT_START
if advance_seconds:
advance_seconds *= max([1, math.ceil(
(current_time - self._start_time) / advance_seconds)])
else:
advance_seconds = 0
schedule_execution.next_start_time = self._start_time + advance_seconds
elif schedule.type == Schedule.Type.TIMED:
self._schedule_next_timed_task(
schedule,
schedule_execution,
datetime.datetime.fromtimestamp(current_time))
elif schedule.type == Schedule.Type.STARTUP:
schedule_execution.next_start_time = current_time
if self._logger.isEnabledFor(logging.INFO):
self._logger.info(
"Scheduled task for schedule '%s' to start at %s", schedule.name,
datetime.datetime.fromtimestamp(schedule_execution.next_start_time))
async def _get_process_scripts(self):
try:
self._logger.debug('Database command: %s', "scheduled_processes")
res = await self._storage_async.query_tbl("scheduled_processes")
for row in res['rows']:
self._process_scripts[row.get('name')] = row.get('script')
except Exception:
self._logger.exception('Query failed: %s', "scheduled_processes")
raise
async def _get_schedules(self):
# TODO: Get processes first, then add to Schedule
try:
self._logger.debug('Database command: %s', 'schedules')
res = await self._storage_async.query_tbl("schedules")
for row in res['rows']:
interval_days, interval_dt = self.extract_day_time_from_interval(row.get('schedule_interval'))
interval = datetime.timedelta(days=interval_days, hours=interval_dt.hour, minutes=interval_dt.minute, seconds=interval_dt.second)
repeat_seconds = None
if interval is not None and interval != datetime.timedelta(0):
repeat_seconds = interval.total_seconds()
s_ti = row.get('schedule_time') if row.get('schedule_time') else '00:00:00'
s_tim = datetime.datetime.strptime(s_ti, "%H:%M:%S")
schedule_time = datetime.time().replace(hour=s_tim.hour, minute=s_tim.minute, second=s_tim.second)
schedule_id = uuid.UUID(row.get('id'))
schedule = self._ScheduleRow(
id=schedule_id,
name=row.get('schedule_name'),
type=row.get('schedule_type'),
day=row.get('schedule_day') if row.get('schedule_day') else None,
time=schedule_time,
repeat=interval,
repeat_seconds=repeat_seconds,
exclusive=True if row.get('exclusive') == 't' else False,
enabled=True if row.get('enabled') == 't' else False,
process_name=row.get('process_name'))
self._schedules[schedule_id] = schedule
self._schedule_first_task(schedule, self._start_time)
except Exception:
self._logger.exception('Query failed: %s', 'schedules')
raise
async def _read_storage(self):
"""Reads schedule information from the storage server"""
await self._get_process_scripts()
await self._get_schedules()
async def _mark_tasks_interrupted(self):
"""The state for any task with a NULL end_time is set to interrupted"""
# TODO FOGL-722 NULL can not be passed like this
""" # Update the task's status
update_payload = PayloadBuilder() \
.SET(state=int(Task.State.INTERRUPTED),
end_time=str(datetime.datetime.now())) \
.WHERE(['end_time', '=', "NULL"]) \
.payload()
try:
self._logger.debug('Database command: %s', update_payload)
res = await self._storage_async.update_tbl("tasks", update_payload)
except Exception:
self._logger.exception('Update failed: %s', update_payload)
raise
"""
pass
async def _read_config(self):
"""Reads configuration"""
default_config = {
"max_running_tasks": {
"description": "Maximum number of tasks that can be running at any given time",
"type": "integer",
"default": str(self._DEFAULT_MAX_RUNNING_TASKS),
"displayName": "Max Running Tasks"
},
"max_completed_task_age_days": {
"description": "Maximum age in days (based on the start time) for a row "
"in the tasks table that does not have a status of running",
"type": "integer",
"default": str(self._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS),
"displayName": "Max Age Of Task (In days)"
},
}
cfg_manager = ConfigurationManager(self._storage_async)
await cfg_manager.create_category('SCHEDULER', default_config, 'Scheduler configuration', display_name='Scheduler')
config = await cfg_manager.get_category_all_items('SCHEDULER')
self._max_running_tasks = int(config['max_running_tasks']['value'])
self._max_completed_task_age = datetime.timedelta(
seconds=int(config['max_completed_task_age_days']['value']) * self._DAY_SECONDS)
async def start(self):
"""Starts the scheduler
When this method returns, an asyncio task is
scheduled that starts tasks and monitors their subprocesses. This class
does not use threads (tasks run as subprocesses).
Raises:
NotReadyError: Scheduler was stopped
"""
if self._paused or self._schedule_executions is None:
raise NotReadyError("The scheduler was stopped and can not be restarted")
if self._ready:
return
if self._start_time:
raise NotReadyError("The scheduler is starting")
self._logger.info("Starting")
self._start_time = self.current_time if self.current_time else time.time()
# FIXME: Move below part code to server.py->_start_core(), line 123, after start of storage and before start
# of scheduler. May need to either pass the storage object or create a storage object here itself.
# Also provide a timeout option.
# ************ make sure that it go forward only when storage service is ready
storage_service = None
while storage_service is None and self._storage_async is None:
try:
found_services = ServiceRegistry.get(name="FogLAMP Storage")
storage_service = found_services[0]
self._storage_async = StorageClientAsync(self._core_management_host, self._core_management_port,
svc=storage_service)
except (service_registry_exceptions.DoesNotExist, InvalidServiceInstance, StorageServiceUnavailable,
Exception) as ex:
# traceback.print_exc()
await asyncio.sleep(5)
# **************
# Everything OK, so now start Scheduler and create Storage instance
self._logger.info("Starting Scheduler: Management port received is %d", self._core_management_port)
await self._read_config()
await self._mark_tasks_interrupted()
await self._read_storage()
self._ready = True
self._scheduler_loop_task = asyncio.ensure_future(self._scheduler_loop())
async def stop(self):
"""Attempts to stop the scheduler
Sends TERM signal to all running tasks. Does not wait for tasks to stop.
Prevents any new tasks from starting. This can be undone by setting the
_paused attribute to False.
Raises:
TimeoutError: A task is still running. Wait and try again.
"""
if not self._start_time:
return
self._logger.info("Processing stop request")
# This method is designed to be called multiple times
if not self._paused:
# Wait for tasks purge task to finish
self._paused = True
if self._purge_tasks_task is not None:
try:
await self._purge_tasks_task
except Exception as ex:
self._logger.exception('An exception was raised by Scheduler._purge_tasks %s', str(ex))
self._resume_check_schedules()
# Stop the main loop
try:
await self._scheduler_loop_task
except Exception as ex:
self._logger.exception('An exception was raised by Scheduler._scheduler_loop %s', str(ex))
self._scheduler_loop_task = None
# Can not iterate over _task_processes - it can change mid-iteration
for task_id in list(self._task_processes.keys()):
try:
task_process = self._task_processes[task_id]
except KeyError:
continue
# TODO: FOGL-356 track the last time TERM was sent to each task
task_process.cancel_requested = time.time()
schedule = task_process.schedule
# Stopping of STARTUP tasks aka microservices will be taken up separately by Core
if schedule.type != Schedule.Type.STARTUP:
self._logger.info(
"Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s",
schedule.name,
schedule.process_name,
task_id,
task_process.process.pid,
self._process_scripts[schedule.process_name])
try:
# We need to terminate the child processes because now all tasks are started vide a script and
# this creates two unix processes. Scheduler can store pid of the parent shell script process only
# and on termination of the task, both the script shell process and actual task process need to
# be stopped.
self._terminate_child_processes(task_process.process.pid)
task_process.process.terminate()
except ProcessLookupError:
pass # Process has terminated
# Wait for all processes to stop
for _ in range(self._STOP_WAIT_SECONDS):
if not self._task_processes:
break
await asyncio.sleep(1)
if self._task_processes:
# Before throwing timeout error, just check if there are still any tasks pending for cancellation
task_count = 0
for task_id in list(self._task_processes.keys()):
try:
task_process = self._task_processes[task_id]
schedule = task_process.schedule
if schedule.process_name != "FogLAMPUpdater":
if schedule.type != Schedule.Type.STARTUP:
task_count += 1
except KeyError:
continue
if task_count != 0:
raise TimeoutError("Timeout Error: Could not stop scheduler as {} tasks are pending".format(task_count))
self._schedule_executions = None
self._task_processes = None
self._schedules = None
self._process_scripts = None
self._ready = False
self._paused = False
self._start_time = None
self._logger.info("Stopped")
return True
# CRUD methods for scheduled_processes, schedules, tasks
async def get_scheduled_processes(self) -> List[ScheduledProcess]:
"""Retrieves all rows from the scheduled_processes table
"""
if not self._ready:
raise NotReadyError()
processes = []
for (name, script) in self._process_scripts.items():
process = ScheduledProcess()
process.name = name
process.script = script
processes.append(process)
return processes
@classmethod
def _schedule_row_to_schedule(cls,
schedule_id: uuid.UUID,
schedule_row: _ScheduleRow) -> Schedule:
schedule_type = schedule_row.type
if schedule_type == Schedule.Type.STARTUP:
schedule = StartUpSchedule()
elif schedule_type == Schedule.Type.TIMED:
schedule = TimedSchedule()
elif schedule_type == Schedule.Type.INTERVAL:
schedule = IntervalSchedule()
elif schedule_type == Schedule.Type.MANUAL:
schedule = ManualSchedule()
else:
raise ValueError("Unknown schedule type {}", schedule_type)
schedule.schedule_id = schedule_id
schedule.exclusive = schedule_row.exclusive
schedule.enabled = schedule_row.enabled
schedule.name = schedule_row.name
schedule.process_name = schedule_row.process_name
schedule.repeat = schedule_row.repeat
if schedule_type == Schedule.Type.TIMED:
schedule.day = schedule_row.day
schedule.time = schedule_row.time
else:
schedule.day = None
schedule.time = None
return schedule
async def get_schedules(self) -> List[Schedule]:
"""Retrieves all schedules
"""
if not self._ready:
raise NotReadyError()
schedules = []
for (schedule_id, schedule_row) in self._schedules.items():
schedules.append(self._schedule_row_to_schedule(schedule_id, schedule_row))
return schedules
async def get_schedule(self, schedule_id: uuid.UUID) -> Schedule:
"""Retrieves a schedule from its id
Raises:
ScheduleNotFoundException
"""
if not self._ready:
raise NotReadyError()
try:
schedule_row = self._schedules[schedule_id]
except KeyError:
raise ScheduleNotFoundError(schedule_id)
return self._schedule_row_to_schedule(schedule_id, schedule_row)
async def get_schedule_by_name(self, name) -> Schedule:
"""Retrieves a schedule from its id
Raises:
ScheduleNotFoundException
"""
if not self._ready:
raise NotReadyError()
found_id = None
for (schedule_id, schedule_row) in self._schedules.items():
if self._schedules[schedule_id].name == name:
found_id = schedule_id
if found_id is None:
raise ScheduleNotFoundError(name)
return self._schedule_row_to_schedule(found_id, schedule_row)
async def save_schedule(self, schedule: Schedule, is_enabled_modified=None):
"""Creates or update a schedule
Args:
schedule:
The id can be None, in which case a new id will be generated
Raises:
NotReadyError: The scheduler is not ready for requests
"""
if self._paused or not self._ready:
raise NotReadyError()
# TODO should these checks be moved to the storage layer?
if schedule.name is None or len(schedule.name) == 0:
raise ValueError("name can not be empty")
if schedule.repeat is not None and not isinstance(schedule.repeat, datetime.timedelta):
raise ValueError('repeat must be of type datetime.timedelta')
if schedule.exclusive is None or not (schedule.exclusive == True or schedule.exclusive == False):
raise ValueError('exclusive can not be None')
if isinstance(schedule, TimedSchedule):
schedule_time = schedule.time
if schedule_time is not None and not isinstance(schedule_time, datetime.time):
raise ValueError('time must be of type datetime.time')
day = schedule.day
# TODO Remove this check when the database has constraint
if day is not None and (day < 1 or day > 7):
raise ValueError('day must be between 1 and 7')
else:
day = None
schedule_time = None
prev_schedule_row = None
if schedule.schedule_id is None:
is_new_schedule = True
schedule.schedule_id = uuid.uuid4()
else:
try:
prev_schedule_row = self._schedules[schedule.schedule_id]
is_new_schedule = False
except KeyError:
is_new_schedule = True
if not is_new_schedule:
update_payload = PayloadBuilder() \
.SET(schedule_name=schedule.name,
schedule_type=schedule.schedule_type,
schedule_interval=str(schedule.repeat),
schedule_day=day if day else 0,
schedule_time=str(schedule_time) if schedule_time else '00:00:00',
exclusive='t' if schedule.exclusive else 'f',
enabled='t' if schedule.enabled else 'f',
process_name=schedule.process_name) \
.WHERE(['id', '=', str(schedule.schedule_id)]) \
.payload()
try:
self._logger.debug('Database command: %s', update_payload)
res = await self._storage_async.update_tbl("schedules", update_payload)
if res.get('count') == 0:
is_new_schedule = True
except Exception:
self._logger.exception('Update failed: %s', update_payload)
raise
audit = AuditLogger(self._storage_async)
await audit.information('SCHCH', {'schedule': schedule.toDict()})
if is_new_schedule:
insert_payload = PayloadBuilder() \
.INSERT(id=str(schedule.schedule_id),
schedule_type=schedule.schedule_type,
schedule_name=schedule.name,
schedule_interval=str(schedule.repeat),
schedule_day=day if day else 0,
schedule_time=str(schedule_time) if schedule_time else '00:00:00',
exclusive='t' if schedule.exclusive else 'f',
enabled='t' if schedule.enabled else 'f',
process_name=schedule.process_name) \
.payload()
try:
self._logger.debug('Database command: %s', insert_payload)
res = await self._storage_async.insert_into_tbl("schedules", insert_payload)
except Exception:
self._logger.exception('Insert failed: %s', insert_payload)
raise
audit = AuditLogger(self._storage_async)
await audit.information('SCHAD', {'schedule': schedule.toDict()})
repeat_seconds = None
if schedule.repeat is not None and schedule.repeat != datetime.timedelta(0):
repeat_seconds = schedule.repeat.total_seconds()
if not is_new_schedule:
previous_enabled = self._schedules[schedule.schedule_id].enabled
else:
previous_enabled = None
schedule_row = self._ScheduleRow(
id=schedule.schedule_id,
name=schedule.name,
type=schedule.schedule_type,
time=schedule_time,
day=day,
repeat=schedule.repeat,
repeat_seconds=repeat_seconds,
exclusive=schedule.exclusive,
enabled=schedule.enabled,
process_name=schedule.process_name)
self._schedules[schedule.schedule_id] = schedule_row
# Add process to self._process_scripts if not present.
try:
if schedule.process_name not in self._process_scripts: raise KeyError
except KeyError:
select_payload = PayloadBuilder().WHERE(['name', '=', schedule.process_name]).payload()
try:
self._logger.debug('Database command: %s', select_payload)
res = await self._storage_async.query_tbl_with_payload("scheduled_processes", select_payload)
for row in res['rows']:
self._process_scripts[row.get('name')] = row.get('script')
except Exception:
self._logger.exception('Select failed: %s', select_payload)
# Did the schedule change in a way that will affect task scheduling?
if schedule.schedule_type in [Schedule.Type.INTERVAL, Schedule.Type.TIMED] and (
is_new_schedule or
prev_schedule_row.time != schedule_row.time or
prev_schedule_row.day != schedule_row.day or
prev_schedule_row.repeat_seconds != schedule_row.repeat_seconds or
prev_schedule_row.exclusive != schedule_row.exclusive):
now = self.current_time if self.current_time else time.time()
self._schedule_first_task(schedule_row, now)
self._resume_check_schedules()
if is_enabled_modified is not None:
if previous_enabled is None: # New Schedule
# For a new schedule, if enabled is set to True, the schedule will be enabled.
bypass_check = True if schedule.enabled is True else None
else: # Existing Schedule
# During update, if a schedule's enabled attribute is not changed then it will return unconditionally
# otherwise suitable action will be invoked.
bypass_check = True if previous_enabled != schedule.enabled else None
if is_enabled_modified is True:
await self.enable_schedule(schedule.schedule_id, bypass_check=bypass_check)
else:
await self.disable_schedule(schedule.schedule_id, bypass_check=bypass_check)
async def remove_service_from_task_processes(self, service_name):
"""
This method caters to the use case when a microservice, e.g. South service, which has been started by the
Scheduler and then is shutdown vide api and then is needed to be restarted. It removes the Scheduler's record
of the task related to the STARTUP schedule (which is not removed when shutdown action is taken by the
microservice api as the microservice is running in a separate process and hinders starting a schedule by
Scheduler's queue_task() method).
Args: service_name:
Returns:
"""
if not self._ready: return False
# Find task_id for the service
task_id = None
task_process = None
schedule_type = None
try:
task_id = next(
(key for key in self._task_processes.keys() if self._task_processes[key].schedule.name == service_name),
None)
if task_id is None: raise KeyError
task_process = self._task_processes[task_id]
if task_id is not None:
schedule = task_process.schedule
schedule_type = schedule.type
if schedule_type == Schedule.Type.STARTUP: # If schedule is a service e.g. South services
del self._schedule_executions[schedule.id]
del self._task_processes[task_process.task_id]
self._logger.info("Service {} records successfully removed".format(service_name))
return True
except KeyError:
pass
self._logger.exception(
"Service {} records could not be removed with task id {} type {}".format(service_name, str(task_id),
schedule_type))
return False
async def disable_schedule(self, schedule_id: uuid.UUID, bypass_check=None):
"""
Find running Schedule, Terminate running process, Disable Schedule, Update database
Args: schedule_id:
Returns:
"""
if self._paused or not self._ready:
raise NotReadyError()
# Find running task for the schedule.
# self._task_processes contains ALL tasks including STARTUP tasks.
try:
schedule = await self.get_schedule(schedule_id)
except ScheduleNotFoundError:
self._logger.exception("No such Schedule %s", str(schedule_id))
return False, "No such Schedule"
if bypass_check is None and schedule.enabled is False:
self._logger.info("Schedule %s already disabled", str(schedule_id))
return True, "Schedule {} already disabled".format(str(schedule_id))
# Disable Schedule - update the schedule in memory
self._schedules[schedule_id] = self._schedules[schedule_id]._replace(enabled=False)
# Update database
update_payload = PayloadBuilder().SET(enabled='f').WHERE(['id', '=', str(schedule_id)]).payload()
try:
self._logger.debug('Database command: %s', update_payload)
res = await self._storage_async.update_tbl("schedules", update_payload)
except Exception:
self._logger.exception('Update failed: %s', update_payload)
raise RuntimeError('Update failed: %s', update_payload)
await asyncio.sleep(1)
# If a task is running for the schedule, then terminate the process
task_id = None
task_process = None
try:
for key in list(self._task_processes.keys()):
if self._task_processes[key].schedule.id == schedule_id:
task_id = key
break
if task_id is None:
raise KeyError
task_process = self._task_processes[task_id]
except KeyError:
self._logger.info("No Task running for Schedule %s", str(schedule_id))
if task_id is not None:
schedule = task_process.schedule
if schedule.type == Schedule.Type.STARTUP: # If schedule is a service e.g. South services
try:
found_services = ServiceRegistry.get(name=schedule.name)
service = found_services[0]
if await utils.ping_service(service) is True:
# Shutdown will take care of unregistering the service from core
await utils.shutdown_service(service)
except:
pass
try:
# As of now, script starts the process and therefore, we need to explicitly stop this script process
# as shutdown caters to stopping of the actual service only.
task_process.process.terminate()
except ProcessLookupError:
pass # Process has terminated
else: # else it is a Task e.g. North tasks
# Terminate process
try:
# We need to terminate the child processes because now all tasks are started vide a script and
# this creates two unix processes. Scheduler can store pid of the parent shell script process only
# and on termination of the task, both the script shell process and actual task process need to
# be stopped.
self._terminate_child_processes(task_process.process.pid)
task_process.process.terminate()
except ProcessLookupError:
pass # Process has terminated
self._logger.info(
"Terminated Task '%s/%s' process '%s' task %s pid %s\n%s",
schedule.name,
str(schedule.id),
schedule.process_name,
task_id,
task_process.process.pid,
self._process_scripts[schedule.process_name])
# TODO: FOGL-356 track the last time TERM was sent to each task
task_process.cancel_requested = time.time()
task_future = task_process.future
if task_future.cancel() is True:
await self._wait_for_task_completion(task_process)
self._logger.info(
"Disabled Schedule '%s/%s' process '%s'\n",
schedule.name,
str(schedule_id),
schedule.process_name)
audit = AuditLogger(self._storage_async)
sch = await self.get_schedule(schedule_id)
await audit.information('SCHCH', {'schedule': sch.toDict()})
return True, "Schedule successfully disabled"
async def enable_schedule(self, schedule_id: uuid.UUID, bypass_check=None):
"""
Get Schedule, Enable Schedule, Update database, Start Schedule
Args: schedule_id:
Returns:
"""
if self._paused or not self._ready:
raise NotReadyError()
try:
schedule = await self.get_schedule(schedule_id)
except ScheduleNotFoundError:
self._logger.exception("No such Schedule %s", str(schedule_id))
return False, "No such Schedule"
if bypass_check is None and schedule.enabled is True:
self._logger.info("Schedule %s already enabled", str(schedule_id))
return True, "Schedule is already enabled"
# Enable Schedule
self._schedules[schedule_id] = self._schedules[schedule_id]._replace(enabled=True)
# Update database
update_payload = PayloadBuilder().SET(enabled='t').WHERE(['id', '=', str(schedule_id)]).payload()
try:
self._logger.debug('Database command: %s', update_payload)
res = await self._storage_async.update_tbl("schedules", update_payload)
except Exception:
self._logger.exception('Update failed: %s', update_payload)
raise RuntimeError('Update failed: %s', update_payload)
await asyncio.sleep(1)
# Reset schedule_execution.next_start_time
schedule_row = self._schedules[schedule_id]
now = self.current_time if self.current_time else time.time()
self._schedule_first_task(schedule_row, now)
# Start schedule
await self.queue_task(schedule_id)
self._logger.info(
"Enabled Schedule '%s/%s' process '%s'\n",
schedule.name,
str(schedule_id),
schedule.process_name)
audit = AuditLogger(self._storage_async)
sch = await self.get_schedule(schedule_id)
await audit.information('SCHCH', { 'schedule': sch.toDict() })
return True, "Schedule successfully enabled"
async def queue_task(self, schedule_id: uuid.UUID) -> None:
"""Requests a task to be started for a schedule
Args:
schedule_id: Specifies the schedule
Raises:
SchedulePausedError:
The scheduler is stopping
ScheduleNotFoundError
"""
if self._paused or not self._ready:
raise NotReadyError()
try:
schedule_row = self._schedules[schedule_id]
except KeyError:
raise ScheduleNotFoundError(schedule_id)
if schedule_row.enabled is False:
self._logger.info("Schedule '%s' is not enabled", schedule_row.name)
return False
try:
schedule_execution = self._schedule_executions[schedule_id]
except KeyError:
schedule_execution = self._ScheduleExecution()
self._schedule_executions[schedule_row.id] = schedule_execution
schedule_execution.start_now = True
self._logger.debug("Queued schedule '%s' for execution", schedule_row.name)
self._resume_check_schedules()
return True
async def delete_schedule(self, schedule_id: uuid.UUID):
"""Deletes a schedule
Args:
schedule_id
Raises:
ScheduleNotFoundError
NotReadyError
"""
if not self._ready:
raise NotReadyError()
try:
schedule = self._schedules[schedule_id]
if schedule.enabled is True:
self._logger.exception('Attempt to delete an enabled Schedule %s. Not deleted.', str(schedule_id))
raise RuntimeWarning("Enabled Schedule {} cannot be deleted.".format(str(schedule_id)))
except KeyError:
raise ScheduleNotFoundError(schedule_id)
del self._schedules[schedule_id]
# TODO: Inspect race conditions with _set_first
delete_payload = PayloadBuilder() \
.WHERE(['id', '=', str(schedule_id)]) \
.payload()
try:
self._logger.debug('Database command: %s', delete_payload)
res = await self._storage_async.delete_from_tbl("schedules", delete_payload)
except Exception:
self._logger.exception('Delete failed: %s', delete_payload)
raise
return True, "Schedule deleted successfully."
async def get_running_tasks(self) -> List[Task]:
"""Retrieves a list of all tasks that are currently running
Returns:
An empty list if no tasks are running
A list of Task objects
"""
if not self._ready:
raise NotReadyError()
tasks = []
for (task_id, task_process) in self._task_processes.items():
task = Task()
task.task_id = task_id
task.schedule_name = task_process.schedule.name
task.process_name = task_process.schedule.process_name
task.state = Task.State.RUNNING
if task_process.cancel_requested is not None:
task.cancel_requested = (
datetime.datetime.fromtimestamp(task_process.cancel_requested))
task.start_time = datetime.datetime.fromtimestamp(task_process.start_time)
tasks.append(task)
return tasks
async def get_task(self, task_id: uuid.UUID) -> Task:
"""Retrieves a task given its id"""
query_payload = PayloadBuilder().SELECT("id", "process_name", "schedule_name", "state", "start_time", "end_time", "reason", "exit_code")\
.ALIAS("return", ("start_time", 'start_time'), ("end_time", 'end_time'))\
.FORMAT("return", ("start_time", "YYYY-MM-DD HH24:MI:SS.MS"), ("end_time", "YYYY-MM-DD HH24:MI:SS.MS"))\
.WHERE(["id", "=", str(task_id)]).payload()
try:
self._logger.debug('Database command: %s', query_payload)
res = await self._storage_async.query_tbl_with_payload("tasks", query_payload)
for row in res['rows']:
task = Task()
task.task_id = row.get('id')
task.state = Task.State(int(row.get('state')))
task.start_time = row.get('start_time')
task.schedule_name = row.get('schedule_name')
task.process_name = row.get('process_name')
task.end_time = row.get('end_time')
task.exit_code = row.get('exit_code')
task.reason = row.get('reason')
return task
except Exception:
self._logger.exception('Query failed: %s', query_payload)
raise
raise TaskNotFoundError(task_id)
async def get_tasks(self, limit=100, offset=0, where=None, and_where=None, or_where=None, sort=None) -> List[Task]:
"""Retrieves tasks
The result set is ordered by start_time descending
Args:
offset:
Ignore this number of rows at the beginning of the result set.
Results are unpredictable unless order_by is used.
limit: Return at most this number of rows
where: A query
sort:
A tuple of Task attributes to sort by.
Defaults to ("start_time", "desc")
"""
chain_payload = PayloadBuilder().SELECT("id", "process_name", "schedule_name", "state", "start_time", "end_time", "reason", "exit_code") \
.ALIAS("return", ("start_time", 'start_time'), ("end_time", 'end_time'))\
.FORMAT("return", ("start_time", "YYYY-MM-DD HH24:MI:SS.MS"), ("end_time", "YYYY-MM-DD HH24:MI:SS.MS"))\
.LIMIT(limit).chain_payload()
if offset:
chain_payload = PayloadBuilder(chain_payload).OFFSET(offset).chain_payload()
if where:
chain_payload = PayloadBuilder(chain_payload).WHERE(where).chain_payload()
if and_where:
chain_payload = PayloadBuilder(chain_payload).AND_WHERE(and_where).chain_payload()
if or_where:
chain_payload = PayloadBuilder(chain_payload).OR_WHERE(or_where).chain_payload()
if sort:
chain_payload = PayloadBuilder(chain_payload).ORDER_BY(sort).chain_payload()
query_payload = PayloadBuilder(chain_payload).payload()
tasks = []
try:
self._logger.debug('Database command: %s', query_payload)
res = await self._storage_async.query_tbl_with_payload("tasks", query_payload)
for row in res['rows']:
task = Task()
task.task_id = row.get('id')
task.state = Task.State(int(row.get('state')))
task.start_time = row.get('start_time')
task.schedule_name = row.get('schedule_name')
task.process_name = row.get('process_name')
task.end_time = row.get('end_time')
task.exit_code = row.get('exit_code')
task.reason = row.get('reason')
tasks.append(task)
except Exception:
self._logger.exception('Query failed: %s', query_payload)
raise
return tasks
async def cancel_task(self, task_id: uuid.UUID) -> None:
"""Cancels a running task
Args:
Raises:
NotReadyError
TaskNotRunningError
"""
if self._paused or not self._ready:
raise NotReadyError()
try:
task_process = self._task_processes[task_id] # _TaskProcess
except KeyError:
raise TaskNotRunningError(task_id)
if task_process.cancel_requested:
# TODO: Allow after some period of time has elapsed
raise DuplicateRequestError()
# TODO: FOGL-356 track the last time TERM was sent to each task
task_process.cancel_requested = time.time()
schedule = task_process.schedule
self._logger.info(
"Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s",
schedule.name,
schedule.process_name,
task_id,
task_process.process.pid,
self._process_scripts[schedule.process_name])
try:
# We need to terminate the child processes because now all tasks are started vide a script and
# this creates two unix processes. Scheduler can store pid of the parent shell script process only
# and on termination of the task, both the script shell process and actual task process need to
# be stopped.
self._terminate_child_processes(task_process.process.pid)
task_process.process.terminate()
except ProcessLookupError:
pass # Process has terminated
if task_process.future.cancel() is True:
await self._wait_for_task_completion(task_process)
def _terminate_child_processes(self, parent_id):
ps_command = subprocess.Popen("ps -o pid --ppid {} --noheaders".format(parent_id), shell=True,
stdout=subprocess.PIPE)
ps_output, err = ps_command.communicate()
pids = ps_output.decode().strip().split("\n")
for pid_str in pids:
if pid_str.strip():
os.kill(int(pid_str.strip()), signal.SIGTERM)
def extract_day_time_from_interval(self, str_interval):
if 'days' in str_interval:
interval_split = str_interval.split('days')
interval_days = interval_split[0].strip()
interval_time = interval_split[1]
elif 'day' in str_interval:
interval_split = str_interval.split('day')
interval_days = interval_split[0].strip()
interval_time = interval_split[1]
else:
interval_days = 0
interval_time = str_interval
if not interval_time:
interval_time = "00:00:00"
interval_time = interval_time.replace(",", "").strip()
interval_time = datetime.datetime.strptime(interval_time, "%H:%M:%S")
return int(interval_days), interval_time
| 41.730818 | 146 | 0.606704 |
acdecdf7919a69e7843a81a276ee8fe343ca7775 | 9,433 | py | Python | enocean/protocol/tests/test_eep.py | j5lien/enocean | 6de1a50b143a34423b5072880a19413098dda6be | [
"MIT"
] | 65 | 2015-02-19T12:31:06.000Z | 2022-03-24T23:10:21.000Z | enocean/protocol/tests/test_eep.py | j5lien/enocean | 6de1a50b143a34423b5072880a19413098dda6be | [
"MIT"
] | 124 | 2015-06-18T15:17:15.000Z | 2022-02-26T17:22:12.000Z | enocean/protocol/tests/test_eep.py | j5lien/enocean | 6de1a50b143a34423b5072880a19413098dda6be | [
"MIT"
] | 92 | 2015-01-04T09:26:09.000Z | 2022-03-06T10:25:26.000Z | # -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from enocean.protocol.packet import Packet
from enocean.protocol.eep import EEP
from enocean.protocol.constants import RORG
from enocean.decorators import timing
@timing(1000)
def test_temperature():
''' Tests RADIO message for EEP -profile 0xA5 0x02 0x05 '''
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x0A, 0x07, 0x01,
0xEB,
0xA5, 0x00, 0x00, 0x55, 0x08, 0x01, 0x81, 0xB7, 0x44, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x2D, 0x00,
0x75
]))
assert packet.parse_eep(0x02, 0x05) == ['TMP']
assert round(packet.parsed['TMP']['value'], 1) == 26.7
assert packet.parsed['TMP']['raw_value'] == 85
assert packet.learn is False
assert packet.contains_eep is False
assert packet.rorg == 0xA5
assert packet.rorg == int(RORG.BS4)
assert packet.rorg_func == 0x02
assert packet.rorg_type == 0x05
assert packet.status == 0x00
assert packet.repeater_count == 0
assert packet.sender == [0x01, 0x81, 0xB7, 0x44]
assert packet.sender_hex == '01:81:B7:44'
@timing(1000)
def test_magnetic_switch():
''' Tests RADIO message for EEP -profile 0xD5 0x00 0x01 '''
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xD5, 0x08, 0x01, 0x82, 0x5D, 0xAB, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x36, 0x00,
0x53
]))
assert packet.parse_eep(0x00, 0x01) == ['CO']
assert packet.parsed['CO']['value'] == 'open'
assert packet.parsed['CO']['raw_value'] == 0
assert packet.status == 0x00
assert packet.repeater_count == 0
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xD5, 0x09, 0x01, 0x82, 0x5D, 0xAB, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x36, 0x00,
0xC7
]))
assert packet.parse_eep(0x00, 0x01) == ['CO']
assert packet.parsed['CO']['value'] == 'closed'
assert packet.parsed['CO']['raw_value'] == 1
assert packet.learn is False
assert packet.status == 0x00
assert packet.repeater_count == 0
@timing(1000)
def test_switch():
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xF6, 0x50, 0x00, 0x29, 0x89, 0x79, 0x30,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x37, 0x00,
0x9D
]))
assert packet.parse_eep(0x02, 0x02) == ['R1', 'EB', 'R2', 'SA', 'T21', 'NU']
assert packet.parsed['SA']['value'] == 'No 2nd action'
assert packet.parsed['EB']['value'] == 'pressed'
assert packet.parsed['R1']['value'] == 'Button BI'
assert packet.parsed['T21']['value'] is True
assert packet.parsed['NU']['value'] is True
assert packet.learn is True
assert packet.status == 0x30
assert packet.repeater_count == 0
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xF6, 0x00, 0x00, 0x29, 0x89, 0x79, 0x20,
0x02, 0xFF, 0xFF, 0xFF, 0xFF, 0x4A, 0x00,
0x03
]))
assert packet.parse_eep(0x02, 0x02) == ['R1', 'EB', 'R2', 'SA', 'T21', 'NU']
assert packet.parsed['SA']['value'] == 'No 2nd action'
assert packet.parsed['EB']['value'] == 'released'
assert packet.parsed['T21']['value'] is True
assert packet.parsed['NU']['value'] is False
assert packet.learn is True
assert packet.status == 0x20
assert packet.repeater_count == 0
@timing(1000)
def test_eep_parsing():
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x0A, 0x07, 0x01,
0xEB,
0xA5, 0x08, 0x28, 0x46, 0x80, 0x01, 0x8A, 0x7B, 0x30, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x49, 0x00,
0x26
]))
assert packet.learn is True
assert packet.contains_eep is True
assert packet.rorg_func == 0x02
assert packet.rorg_type == 0x05
assert packet.status == 0x00
assert packet.repeater_count == 0
@timing(1000)
def test_eep_remaining():
# Magnetic switch -example
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xD5, 0x08, 0x01, 0x82, 0x5D, 0xAB, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x36, 0x00,
0x53
]))
assert packet.parse_eep(0x00, 0x01) == ['CO']
# Temperature-example
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x0A, 0x07, 0x01,
0xEB,
0xA5, 0x00, 0x00, 0x55, 0x08, 0x01, 0x81, 0xB7, 0x44, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x2D, 0x00,
0x75
]))
# If this fails, the data is retained from the last Packet parsing!
assert packet.parse_eep(0x00, 0x01) == []
# Once we have parse with the correct func and type, this should pass.
assert packet.parse_eep(0x02, 0x05) == ['TMP']
@timing(1000)
def test_eep_direction():
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x0A, 0x07, 0x01,
0xEB,
0xA5, 0x32, 0x20, 0x89, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, 0x00,
0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
0x43
]))
assert packet.parse_eep(0x20, 0x01, 1) == ['CV', 'SO', 'ENIE', 'ES', 'BCAP', 'CCO', 'FTS', 'DWO', 'ACO', 'TMP']
assert packet.parsed['CV']['value'] == 50
assert packet.parse_eep(0x20, 0x01, 2) == ['SP', 'TMP', 'RIN', 'LFS', 'VO', 'VC', 'SB', 'SPS', 'SPN', 'RCU']
assert packet.parsed['SP']['value'] == 50
@timing(1000)
def test_vld():
status, buf, p = Packet.parse_msg(bytearray([
0x55,
0x00, 0x09, 0x07, 0x01,
0x56,
0xD2, 0x04, 0x00, 0x64, 0x01, 0x94, 0xE3, 0xB9, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x40, 0x00,
0xE4
]))
assert p.rorg == RORG.VLD
assert p.parse_eep(0x01, 0x01) == ['PF', 'PFD', 'CMD', 'OC', 'EL', 'IO', 'LC', 'OV']
assert p.parsed['EL']['raw_value'] == 0
assert p.parsed['EL']['value'] == 'Error level 0: hardware OK'
assert p.parsed['PF']['raw_value'] == 0
assert p.parsed['PF']['value'] == 'Power Failure Detection disabled/not supported'
assert p.parsed['PFD']['raw_value'] == 0
assert p.parsed['PFD']['value'] == 'Power Failure Detection not detected/not supported/disabled'
assert p.parsed['IO']['raw_value'] == 0
assert p.parsed['IO']['value'] == 'Output channel 0 (to load)'
assert p.parsed['OV']['raw_value'] == 100
assert p.parsed['OV']['value'] == 'Output value 100% or ON'
assert p.parsed['OC']['raw_value'] == 0
assert p.parsed['OC']['value'] == 'Over current switch off: ready / not supported'
assert p.parsed['LC']['raw_value'] == 0
assert p.parsed['LC']['value'] == 'Local control disabled / not supported'
status, buf, p = Packet.parse_msg(bytearray([
0x55,
0x00, 0x09, 0x07, 0x01,
0x56,
0xD2, 0x04, 0x00, 0x00, 0x01, 0x94, 0xE3, 0xB9, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x40, 0x00,
0xBF
]))
assert p.rorg == RORG.VLD
assert p.parse_eep(0x01, 0x01) == ['PF', 'PFD', 'CMD', 'OC', 'EL', 'IO', 'LC', 'OV']
assert p.parsed['EL']['raw_value'] == 0
assert p.parsed['EL']['value'] == 'Error level 0: hardware OK'
assert p.parsed['PF']['raw_value'] == 0
assert p.parsed['PF']['value'] == 'Power Failure Detection disabled/not supported'
assert p.parsed['PFD']['raw_value'] == 0
assert p.parsed['PFD']['value'] == 'Power Failure Detection not detected/not supported/disabled'
assert p.parsed['IO']['raw_value'] == 0
assert p.parsed['IO']['value'] == 'Output channel 0 (to load)'
assert p.parsed['OV']['raw_value'] == 0
assert p.parsed['OV']['value'] == 'Output value 0% or OFF'
assert p.parsed['OC']['raw_value'] == 0
assert p.parsed['OC']['value'] == 'Over current switch off: ready / not supported'
assert p.parsed['LC']['raw_value'] == 0
assert p.parsed['LC']['value'] == 'Local control disabled / not supported'
def test_fails():
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x07, 0x07, 0x01,
0x7A,
0xD5, 0x08, 0x01, 0x82, 0x5D, 0xAB, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x36, 0x00,
0x53
]))
eep = EEP()
# Mock initialization failure
eep.init_ok = False
assert eep.find_profile(packet._bit_data, 0xD5, 0x00, 0x01) is None
# TODO: Needs better test. A much better.
assert eep.set_values(profile=None, data=[True], status=[False, False], properties={'CV': False})
eep.init_ok = True
profile = eep.find_profile(packet._bit_data, 0xD5, 0x00, 0x01)
assert eep.set_values(profile, packet._bit_data, packet.status, {'ASD': 1})
assert eep.find_profile(packet._bit_data, 0xFF, 0x00, 0x01) is None
assert eep.find_profile(packet._bit_data, 0xD5, 0xFF, 0x01) is None
assert eep.find_profile(packet._bit_data, 0xD5, 0x00, 0xFF) is None
status, buf, packet = Packet.parse_msg(bytearray([
0x55,
0x00, 0x09, 0x07, 0x01,
0x56,
0xD2, 0x04, 0x00, 0x00, 0x01, 0x94, 0xE3, 0xB9, 0x00,
0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x40, 0x00,
0xBF
]))
assert eep.find_profile(packet._bit_data, 0xD2, 0x01, 0x01) is not None
assert eep.find_profile(packet._bit_data, 0xD2, 0x01, 0x01, command=-1) is None
| 35.066914 | 115 | 0.607336 |
acded06339369c1a281f7a87072f4129a0548d6c | 8,606 | py | Python | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Fd.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | 1 | 2015-04-30T14:18:45.000Z | 2015-04-30T14:18:45.000Z | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Fd.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Fd.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | ## @file
# process FD generation
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Region
import Fv
import os
import StringIO
import sys
from struct import *
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import FDClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import SaveFileOnChange
from GenFds import GenFds
## generate FD
#
#
class FD(FDClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FDClassObject.__init__(self)
## GenFd() method
#
# Generate FD
#
# @retval string Generated FD file name
#
def GenFd (self):
if self.FdUiName.upper() + 'fd' in GenFds.ImageBinDict.keys():
return GenFds.ImageBinDict[self.FdUiName.upper() + 'fd']
#
# Print Information
#
GenFdsGlobalVariable.InfLogger("Fd File Name:%s" %self.FdUiName)
Offset = 0x00
for item in self.BlockSizeList:
Offset = Offset + item[0] * item[1]
if Offset != self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s Size not consistent with block array' % self.FdUiName)
GenFdsGlobalVariable.VerboseLogger('Following Fv will be add to Fd !!!')
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
GenFdsGlobalVariable.VerboseLogger(FvObj)
GenFdsGlobalVariable.VerboseLogger('################### Gen VTF ####################')
self.GenVtfFile()
TempFdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.RegionType == 'CAPSULE':
continue
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
pass
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
pass
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
PadRegion.AddToBuffer(TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Call each region's AddToBuffer function
#
if PreviousRegionSize > self.Size:
pass
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
FdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X in wrong order with Region starting from 0x%X, size 0x%X\nRegions in FDF must have offsets appear in ascending order.'\
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X overlaps with Region starting from 0x%X, size 0x%X' \
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
PadRegion.AddToBuffer(FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Call each region's AddToBuffer function
#
if PreviousRegionSize > self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s size too small' % self.FdUiName)
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
#
# Create a empty Fd file
#
GenFdsGlobalVariable.VerboseLogger ('Create an empty Fd file')
FdFileName = os.path.join(GenFdsGlobalVariable.FvDir,self.FdUiName + '.fd')
#
# Write the buffer contents to Fd file
#
GenFdsGlobalVariable.VerboseLogger('Write the buffer contents to Fd file')
SaveFileOnChange(FdFileName, FdBuffer.getvalue())
FdBuffer.close();
GenFds.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName
return FdFileName
## generate VTF
#
# @param self The object pointer
#
def GenVtfFile (self) :
#
# Get this Fd's all Fv name
#
FvAddDict ={}
FvList = []
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'FV':
if len(RegionObj.RegionDataList) == 1:
RegionData = RegionObj.RegionDataList[0]
FvList.append(RegionData.upper())
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
RegionObj.Offset, RegionObj.Size)
else:
Offset = RegionObj.Offset
for RegionData in RegionObj.RegionDataList:
FvList.append(RegionData.upper())
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(RegionData.upper())
if len(FvObj.BlockSizeList) < 1:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FV.%s must point out FVs blocksize and Fv BlockNum' \
% FvObj.UiFvName)
else:
Size = 0
for blockStatement in FvObj.BlockSizeList:
Size = Size + blockStatement[0] * blockStatement[1]
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
Offset, Size)
Offset = Offset + Size
#
# Check whether this Fd need VTF
#
Flag = False
for VtfObj in GenFdsGlobalVariable.FdfParser.Profile.VtfList:
compLocList = VtfObj.GetFvList()
if set(compLocList).issubset(FvList):
Flag = True
break
if Flag == True:
self.vtfRawDict = VtfObj.GenVtf(FvAddDict)
## generate flash map file
#
# @param self The object pointer
#
def GenFlashMap (self):
pass
| 44.133333 | 213 | 0.61004 |
acded0c1876be528a18860555e07e8c9e39a4144 | 468 | py | Python | test/test_shica.py | hugorichard/ShICA | 831a44ce277c6f1a82fb4689b4524d9dbf395e33 | [
"BSD-3-Clause"
] | 4 | 2021-11-01T12:43:11.000Z | 2022-02-10T10:16:56.000Z | test/test_shica.py | hugorichard/ShICA | 831a44ce277c6f1a82fb4689b4524d9dbf395e33 | [
"BSD-3-Clause"
] | null | null | null | test/test_shica.py | hugorichard/ShICA | 831a44ce277c6f1a82fb4689b4524d9dbf395e33 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from shica import shica_ml, shica_j
from shica.exp_utils import amari_d
import matplotlib.pyplot as plt
m, p, n = 4, 10, 1000
S = np.random.randn(p, n)
A = np.random.randn(m, p, p)
N = np.random.randn(m, p, n)
powers = np.random.rand(m, p)
X = np.array([a.dot(S + p[:, None] * n) for p, a, n in zip(powers, A, N)])
def test_shicaj():
W_pred, Sigmas, S = shica_j(X)
for w_pred, a in zip(W_pred, A):
assert amari_d(w_pred, a) < 0.1
| 26 | 74 | 0.641026 |
acded21dbc3181e16f303dce97c89c711f3022bb | 2,426 | py | Python | src/compas_rhino/utilities/constructors.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas_rhino/utilities/constructors.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | src/compas_rhino/utilities/constructors.py | Sam-Bouten/compas | 011c7779ded9b69bb602568b470bb0443e336f62 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.utilities import geometric_key
import Rhino
import scriptcontext as sc
__all__ = ['volmesh_from_polysurfaces']
def volmesh_from_polysurfaces(cls, guids, precision=None):
"""Construct a volumetric mesh from given polysurfaces.
Parameters
----------
cls : :class:`compas.datastructures.VolMesh`
The class of volmesh.
guids : sequence[str or System.Guid]
The *globally unique identifiers* of the polysurfaces.
precision: string
Precision of the polysurface connectivity.
Returns
-------
:class:`compas.datastructures.Volmesh`
The volumetric mesh object.
Notes
-----
Essentially, this function does the following:
* find each of the polysurfaces and check if they have a boundary representation (b-rep)
* convert to b-rep and extract the edge loops
* make a face of each loop by referring to vertices using their geometric keys
* add a cell per brep
* and add the faces of a brep to the cell
* create a volmesh from the found vertices and cells
"""
gkey_xyz = {}
cells = []
for guid in guids:
cell = []
obj = sc.doc.Objects.Find(guid)
if not obj.Geometry.HasBrepForm:
continue
brep = Rhino.Geometry.Brep.TryConvertBrep(obj.Geometry)
for loop in brep.Loops:
curve = loop.To3dCurve()
segments = curve.Explode()
face = []
sp = segments[0].PointAtStart
ep = segments[0].PointAtEnd
sp_gkey = geometric_key(sp, precision)
ep_gkey = geometric_key(ep, precision)
gkey_xyz[sp_gkey] = sp
gkey_xyz[ep_gkey] = ep
face.append(sp_gkey)
face.append(ep_gkey)
for segment in segments[1:-1]:
ep = segment.PointAtEnd
ep_gkey = geometric_key(ep, precision)
face.append(ep_gkey)
gkey_xyz[ep_gkey] = ep
cell.append(face)
cells.append(cell)
gkey_index = dict((gkey, index) for index, gkey in enumerate(gkey_xyz))
vertices = [list(xyz) for gkey, xyz in gkey_xyz.items()]
cells = [[[gkey_index[gkey] for gkey in face] for face in cell] for cell in cells]
return cls.from_vertices_and_cells(vertices, cells)
| 32.783784 | 92 | 0.636851 |
acded323a29d108af9713eb38333bb2de952e30c | 2,412 | py | Python | shared/test/predict_M2/predict_M2.py | fakeface-mmc/fakeface-mmc | e1dcb879fc91879fdd5c4a8782386a03fe006c67 | [
"MIT"
] | 3 | 2019-06-05T09:06:54.000Z | 2020-08-02T12:57:49.000Z | shared/test/predict_M2/predict_M2.py | fakeface-mmc/fakeface-mmc | e1dcb879fc91879fdd5c4a8782386a03fe006c67 | [
"MIT"
] | 1 | 2019-01-08T07:55:41.000Z | 2019-01-08T07:55:41.000Z | shared/test/predict_M2/predict_M2.py | fakeface-mmc/fakeface-mmc | e1dcb879fc91879fdd5c4a8782386a03fe006c67 | [
"MIT"
] | 1 | 2019-09-04T14:13:57.000Z | 2019-09-04T14:13:57.000Z | import os.path
import tensorflow as tf
import multiprocessing
from predict_M2.model.net_xception import model_predict
import glob
from ntpath import basename
from predict_M2.utils.face_crop import crop
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def log_string(LOG_FOUT, out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
def parse_fn(img, x):
img = tf.cast(
tf.image.resize_images(tf.image.decode_jpeg(img, dct_method='INTEGER_ACCURATE', channels=3),
[64, 64]), dtype=tf.float32)
image_resized = tf.divide(img, 255)
return image_resized, x
def predict(data_dir):
data_dir = data_dir+'/*.jpg'
max_cpus = multiprocessing.cpu_count()
batch_size = tf.placeholder(tf.int64)
dataset = tf.data.Dataset.from_tensor_slices(glob.glob(data_dir))
dataset = dataset.map(lambda filename : tuple(tf.py_func(crop,[filename], [tf.string, tf.string])), num_parallel_calls=int(max_cpus / 2))
dataset = dataset.map(parse_fn, num_parallel_calls=int(max_cpus / 2))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=200)
iterator = dataset.make_initializable_iterator()
img, name = iterator.get_next()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
phase = tf.placeholder(tf.bool, name='phase')
prediction = model_predict(img, phase)
tf.global_variables_initializer().run()
cur_path = os.path.dirname(os.path.abspath(__file__))
restore_epoch = 12
saver = tf.train.Saver(max_to_keep=300, var_list=[v for v in tf.global_variables() if v.name.startswith('block')])
ckpt_dir = os.path.join(cur_path, 'log', 'epoch' + str(restore_epoch) + '_model.ckpt')
saver.restore(sess, ckpt_dir)
LOG_FOUT_test = open('./syn_vs_others.txt'.format(restore_epoch), "w")
sess.run(iterator.initializer, feed_dict={batch_size: 4})
while True:
try:
prediction_, fname = sess.run([prediction, name], feed_dict={phase: False})
for i in range(len(fname)):
name_ = basename(tf.compat.as_text(fname[i], encoding='utf-8'))
name_ = name_.split('.')[0]
log_string(LOG_FOUT_test, name_ + ' {0:.3f}'.format(1 - prediction_[i][0]))
except tf.errors.OutOfRangeError:
break
| 36.545455 | 141 | 0.660033 |
acded3d83b2782e965201751885efe62b17b0760 | 2,462 | py | Python | parserUnity.py | ToxicCrack/GameAssetCreator | 2d0c5d7f592c5dbe5bf943d0a9ee4912557415a3 | [
"MIT"
] | null | null | null | parserUnity.py | ToxicCrack/GameAssetCreator | 2d0c5d7f592c5dbe5bf943d0a9ee4912557415a3 | [
"MIT"
] | null | null | null | parserUnity.py | ToxicCrack/GameAssetCreator | 2d0c5d7f592c5dbe5bf943d0a9ee4912557415a3 | [
"MIT"
] | null | null | null | #encoding: UTF-8
#The MIT License
#Copyright 2018 Daniel Lichtblau
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from parserBase import *
import pprint
import re
class parserUnity(parserBase):
def parse(self, url, asset):
tags = []
description = ""
name = ""
#field-name-field-art-tags
html = self.getHtml(url)
matches = re.findall('<div class="Eh1GG _15pcy".*?>(.*?)</div>', html, flags=re.IGNORECASE)
if(matches is not None):
tags = matches
matches = re.search('<meta name="twitter:description" content="(.*?)"', html, flags=re.I | re.DOTALL)
if(matches is not None):
description = matches.group(1).replace("</p>", "\n")
description = description.replace("</li>", "\n")
description = description.replace("<br>", "\n")
description = description.replace("<br />", "\n")
description = self.cleanhtml(description)
matches = re.search('<meta name="twitter:title" content="(.*?)"', html, flags=re.IGNORECASE)
if(matches is not None):
name = matches.group(1).replace(" - Asset Store", "")
matches = re.search('<meta name="twitter:image" content="(.*?)"', html, flags=re.IGNORECASE)
if(matches is not None):
preview = matches.group(1)
self.saveURL(preview, asset['path'])
return {"tags": tags, "description": description, "name": name} | 60.04878 | 461 | 0.669374 |
acded5ccae853a5603c86336829ea60c4e04d00c | 529 | py | Python | py/cidoc_crm_types/entities/e30_right.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/entities/e30_right.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/entities/e30_right.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | from .e89_propositional_object import E89PropositionalObject
from dataclasses import dataclass
@dataclass
class E30Right(E89PropositionalObject):
"""
Scope note:
This class comprises legal privileges concerning material and immaterial things or their derivatives.
These include reproduction and property rights.
Examples:
- Copyright held by ISO on ISO/CD 21127
- ownership of the "Mona Lisa" by the Louvre
In First Order Logic:
E30(x) ⊃ E89(x
"""
TYPE_URI = "http://erlangen-crm.org/current/E30_Right"
| 22.041667 | 101 | 0.775047 |
acded602bcd4220721b744e04667a288ecef4072 | 1,466 | py | Python | reactivetools/periodic_event.py | sepidehpouyan/reactive-tools | b30085e7ae13aa1e22942d1844189e85aaf98432 | [
"MIT"
] | null | null | null | reactivetools/periodic_event.py | sepidehpouyan/reactive-tools | b30085e7ae13aa1e22942d1844189e85aaf98432 | [
"MIT"
] | 1 | 2021-07-20T16:26:11.000Z | 2021-07-27T09:08:11.000Z | reactivetools/periodic_event.py | sepidehpouyan/reactive-tools | b30085e7ae13aa1e22942d1844189e85aaf98432 | [
"MIT"
] | 2 | 2021-02-19T13:37:06.000Z | 2021-08-03T12:24:20.000Z | import logging
class PeriodicEvent:
def __init__(self, name, id_, module, entry, frequency, established):
self.name = name
self.id = id_
self.module = module
self.entry = entry
self.frequency = frequency
self.established = established
@staticmethod
def load(event_dict, config):
id_ = event_dict.get('id')
module = config.get_module(event_dict['module'])
entry = event_dict['entry']
frequency = event_dict['frequency']
established = event_dict.get('established')
if id_ is None:
id_ = config.events_current_id # incremental ID
config.events_current_id += 1
name = event_dict.get('name') or "event{}".format(id_)
return PeriodicEvent(name, id_, module, entry, frequency, established)
def dump(self):
return {
"name": self.name,
"id": self.id,
"module": self.module.name,
"entry": self.entry,
"frequency": self.frequency,
"established": self.established
}
async def register(self):
if self.established:
return
node = self.module.node
await node.register_entrypoint(self.module, self.entry, self.frequency)
logging.info('Registered %s:%s on %s every %d ms',
self.module.name, self.entry, node.name, self.frequency)
self.established = True
| 28.745098 | 79 | 0.587995 |
acded6f7be6caec7e644b7c2cdc13d1893b82b7d | 6,778 | py | Python | python/test.py | AaronWChen/suggest_recipe | 3d86693c0680804b9af475a428e7db6152ab2628 | [
"MIT"
] | 1 | 2020-12-08T19:42:45.000Z | 2020-12-08T19:42:45.000Z | python/test.py | AaronWChen/suggest_recipe | 3d86693c0680804b9af475a428e7db6152ab2628 | [
"MIT"
] | 7 | 2020-03-26T22:10:27.000Z | 2022-03-12T00:22:11.000Z | python/test.py | AaronWChen/suggest_recipe | 3d86693c0680804b9af475a428e7db6152ab2628 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import collections
import numpy as np
train_path = "../raw_data/train.json"
save_path = "../write_data"
embedding_size = 100
epochs_desired = 15
learning_rate = 0.025
regularization = 0.01
algo_optimizer = 'adam'
flatten = lambda l: [item for sublist in l for item in sublist]
def read_data(train_path):
sentences = []
with open(train_path, 'rb') as f:
for line in f:
sentences.append(line.rstrip().split())
return sentences
def build_dataset(sentences, min_count=0):
count = [['UNK', -1]]
sentences_flat = flatten(sentences)
counter = collections.Counter(sentences_flat)
n = len(counter)
filt = [(word, c) for word, c in counter.most_common(n) if c > min_count]
count.extend(filt)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for sentence in sentences:
sentence_ids = []
for word in sentence:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
sentence_ids.append(index)
data.append(sentence_ids)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def build_train_validation(data, validation_fraction=0.1):
vad_idx = np.random.choice(
range(len(data)), int(validation_fraction * len(data)), replace=False)
raw_vad_data = [data[i] for i in vad_idx]
train_data = [data[i] for i in list(set(range(len(data))) - set(vad_idx))]
train_counts = collections.Counter(flatten(train_data))
vad_data = []
for vad_sentence in raw_vad_data:
if any(word not in train_counts for word in vad_sentence):
train_data.append(vad_sentence)
else:
vad_data.append(vad_sentence)
print(f"""Split data into {len(train_data)} train and {len(vad_data)}
validation""")
return train_data, vad_data
def generate_batch(data, corpus_size, count, subsample=1e-3):
global sentence_index
global words_processed
raw_sentence = data[sentence_index]
if subsample == 0.:
sentence = raw_sentence
else:
sentence = []
for word_id in raw_sentence:
word_freq = count[word_id][1]
keep_prob = ((np.sqrt(word_freq / (subsample * corpus_size)) + 1) *
(subsample * corpus_size) / word_freq)
if np.random.rand() > keep_prob:
pass
else:
sentence.append(word_id)
if len(sentence) < 2:
sentence = raw_sentence
sentence_index = (sentence_index + 1) % len(data)
return get_sentence_inputs(sentence, len(count))
def get_sentence_inputs(sentence, vocabulary_size):
sentence_set = set(sentence)
batch = np.asarray(sentence, dtype=np.int32)
labels = np.asarray(
[list(sentence_set - set([w])) for w in sentence], dtype=np.int32)
return batch, labels
def train():
raw_data = read_data(train_path)
data, count, dictionary, reverse_dictionary = build_dataset(raw_data)
train_data, vad_data = build_train_validation(data)
vocabulary_size = len(dictionary)
words_per_epoch = len(flatten(train_data))
sentences_per_epoch = len(train_data)
del raw_data # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[0][:10],
[reverse_dictionary[i] for i in data[0][:10]])
global sentence_index
global words_processed
sentence_index = 0
words_processed = 0
print('example batch: ')
batch, labels = generate_batch(data, words_per_epoch, count)
for i in range(len(batch)):
print(batch[i], reverse_dictionary[batch[i]],
'->', [w for w in labels[i]],
[reverse_dictionary[w] for w in labels[i]])
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick words in the head of the distribution
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
train_inputs = tf.Variable(initial_value=tf.ones([1,1],
dtype=tf.int32),
validate_shape=False)
train_labels = tf.Variable(initial_value=tf.ones([1,1],
dtype=tf.int32),
validate_shape=False)
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
words_processed_ph = tf.Variable(initial_value=tf.zeros(
[1,1],
dtype=tf.int32),
validate_shape=False)
words_to_train = float(words_per_epoch * epochs_desired)
lr = learning_rate * tf.maximum(0.0001,
1.0 - tf.cast(words_processed_ph,
tf.float32) / words_to_train)
model = keras.Sequential([
keras.layers.Embedding(input_dim=vocabulary_size,
output_dim=embedding_size,
),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='softmax')
])
if algo_optimizer == 'sgd':
model.compile(optimizer=keras.optimizers.SGD(lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
elif algo_optimizer == 'adam':
model.compile(optimizer=keras.optimizers.Adam(lr,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
num_steps = 1000001
average_loss = 0.
sentences_to_train = epochs_desired * len(data)
for step in range(num_steps):
if step < sentences_to_train:
batch_inputs, batch_labels = generate_batch(train_data,
words_per_epoch,
count)
#feed_dict = {train_inputs: batch_inputs,
# train_labels: batch_labels,
# words_processed_ph.experimental_ref(): words_processed}
#print(model.summary())
print(train_data)
#model.fit(np.array(train_data), epochs=epochs_desired)
#model.evaluate(np.array(vad_data))
train() | 36.053191 | 79 | 0.588227 |
acded70d12813340450d19a89d19db70d53ee861 | 33,991 | py | Python | tensorflow_probability/python/distributions/joint_distribution.py | adriang133/probability | edfc4585f38017153fe7bf1a7287fcdd237912c4 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/joint_distribution.py | adriang133/probability | edfc4585f38017153fe7bf1a7287fcdd237912c4 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/joint_distribution.py | adriang133/probability | edfc4585f38017153fe7bf1a7287fcdd237912c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The `JointDistribution` base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import docstring_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import samplers
from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
__all__ = [
'dummy_seed',
'JointDistribution',
]
JAX_MODE = False
CALLING_CONVENTION_DESCRIPTION = """
The measure methods of `JointDistribution` (`log_prob`, `prob`, etc.)
can be called either by passing a single structure of tensors or by using
named args for each part of the joint distribution state. For example,
```python
jd = tfd.JointDistributionSequential([
tfd.Normal(0., 1.),
lambda z: tfd.Normal(z, 1.)
], validate_args=True)
jd.dtype
# ==> [tf.float32, tf.float32]
z, x = sample = jd.sample()
# The following calling styles are all permissable and produce the exactly
# the same output.
assert (jd.{method}(sample) ==
jd.{method}(value=sample) ==
jd.{method}(z, x) ==
jd.{method}(z=z, x=x) ==
jd.{method}(z, x=x))
# These calling possibilities also imply that one can also use `*`
# expansion, if `sample` is a sequence:
jd.{method}(*sample)
# and similarly, if `sample` is a map, one can use `**` expansion:
jd.{method}(**sample)
```
`JointDistribution` component distributions names are resolved via
`jd._flat_resolve_names()`, which is implemented by each `JointDistribution`
subclass (see subclass documentation for details). Generally, for components
where a name was provided---
either explicitly as the `name` argument to a distribution or as a key in a
dict-valued JointDistribution, or implicitly, e.g., by the argument name of
a `JointDistributionSequential` distribution-making function---the provided
name will be used. Otherwise the component will receive a dummy name; these
may change without warning and should not be relied upon.
Note: not all `JointDistribution` subclasses support all calling styles;
for example, `JointDistributionNamed` does not support positional arguments
(aka "unnamed arguments") unless the provided model specifies an ordering of
variables (i.e., is an `collections.OrderedDict` or `collections.namedtuple`
rather than a plain `dict`).
Note: care is taken to resolve any potential ambiguity---this is generally
possible by inspecting the structure of the provided argument and "aligning"
it to the joint distribution output structure (defined by `jd.dtype`). For
example,
```python
trivial_jd = tfd.JointDistributionSequential([tfd.Exponential(1.)])
trivial_jd.dtype # => [tf.float32]
trivial_jd.{method}([4.])
# ==> Tensor with shape `[]`.
{method_abbr} = trivial_jd.{method}(4.)
# ==> Tensor with shape `[]`.
```
Notice that in the first call, `[4.]` is interpreted as a list of one
scalar while in the second call the input is a scalar. Hence both inputs
result in identical scalar outputs. If we wanted to pass an explicit
vector to the `Exponential` component---creating a vector-shaped batch
of `{method}`s---we could instead write
`trivial_jd.{method}(np.array([4]))`.
Args:
*args: Positional arguments: a `value` structure or component values
(see above).
**kwargs: Keyword arguments: a `value` structure or component values
(see above). May also include `name`, specifying a Python string name
for ops generated by this method.
"""
# Avoids name collision with measure function (`log_prob`, `prob`, etc.) args.
FORBIDDEN_COMPONENT_NAMES = ('value', 'name')
def dummy_seed():
"""Returns a fixed constant seed, for cases needing samples without a seed."""
# TODO(b/147874898): After 20 Dec 2020, drop the 42 and inline the zeros_seed.
return samplers.zeros_seed() if JAX_MODE else 42
@six.add_metaclass(abc.ABCMeta)
class JointDistribution(distribution_lib.Distribution):
"""Joint distribution over one or more component distributions.
This distribution enables both sampling and joint probability computation from
a single model specification.
A joint distribution is a collection of possibly interdependent distributions.
**Note**: unlike other non-`JointDistribution` distributions in
`tfp.distributions`, `JointDistribution.sample` (and subclasses) return a
structure of `Tensor`s rather than a `Tensor`. A structure can be a `list`,
`tuple`, `dict`, `collections.namedtuple`, etc. Accordingly
`joint.batch_shape` returns a structure of `TensorShape`s for each of the
distributions' batch shapes and `joint.batch_shape_tensor()` returns a
structure of `Tensor`s for each of the distributions' event shapes. (Same with
`event_shape` analogues.)
#### Subclass Requirements
Subclasses implement:
- `_flat_sample_distributions`: returns two `list`-likes: the first being a
sequence of `Distribution`-like instances the second being a sequence of
`Tensor` samples, each one drawn from its corresponding `Distribution`-like
instance. The optional `value` argument is either `None` or a `list`-like
with the same `len` as either of the results.
- `_model_flatten`: takes a structured input and returns a sequence.
- `_model_unflatten`: takes a sequence and returns a structure matching the
semantics of the `JointDistribution` subclass.
Subclasses initialize:
- `_single_sample_distributions`: an empty dictionary, which will hold a
mapping from graph id to a prototypical list of component distributions
sampled from the model.
"""
def _get_single_sample_distributions(self, candidate_dists=None):
"""Returns a list of dists from a single sample of the model."""
# If we have cached distributions with Eager tensors, return those.
ds = self._single_sample_distributions.get(-1, None)
if ds is not None and all([d is not None for d in ds]):
return ds
# Otherwise, retrieve or build distributions for the current graph.
graph_id = -1 if tf.executing_eagerly() else id(tf.constant(True).graph)
ds = self._single_sample_distributions.get(graph_id, None)
if ds is None or any([d is None for d in ds]):
if candidate_dists is not None:
ds = candidate_dists
else:
ds = self._flat_sample_distributions( # Constant seed for CSE.
seed=dummy_seed())[0]
self._single_sample_distributions[graph_id] = ds
return ds
# Override `tf.Module`'s `_flatten` method to ensure that distributions are
# instantiated, so that accessing `.variables` or `.trainable_variables` gives
# consistent results.
def _flatten(self, *args, **kwargs):
self._get_single_sample_distributions()
return super(JointDistribution, self)._flatten(*args, **kwargs)
@abc.abstractmethod
def _flat_sample_distributions(self, sample_shape=(), seed=None, value=None):
raise NotImplementedError()
@abc.abstractmethod
def _model_unflatten(self, xs):
raise NotImplementedError()
@abc.abstractmethod
def _model_flatten(self, xs):
raise NotImplementedError()
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._model_unflatten([
d.dtype for d in self._get_single_sample_distributions()])
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`tfd.FULLY_REPARAMETERIZED` or `tfd.NOT_REPARAMETERIZED`.
Returns:
reparameterization_type: `ReparameterizationType` of each distribution in
`model`.
"""
return self._model_unflatten([
d.reparameterization_type
for d in self._get_single_sample_distributions()])
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `tuple` of `TensorShape`s representing the `batch_shape` for
each distribution in `model`.
"""
return self._model_unflatten([
d.batch_shape for d in self._get_single_sample_distributions()])
def batch_shape_tensor(self, sample_shape=(), name='batch_shape_tensor'):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
sample_shape: The sample shape under which to evaluate the joint
distribution. Sample shape at root (toplevel) nodes may affect the batch
or event shapes of child nodes.
name: name to give to the op
Returns:
batch_shape: `Tensor` representing batch shape of each distribution in
`model`.
"""
with self._name_and_control_scope(name):
return self._model_unflatten(
self._map_attr_over_dists(
'batch_shape_tensor',
dists=(self.sample_distributions(sample_shape)
if sample_shape else None)))
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `tuple` of `TensorShape`s representing the `event_shape` for
each distribution in `model`.
"""
# Caching will not leak graph Tensors since this is a static attribute.
if not hasattr(self, '_cached_event_shape'):
self._cached_event_shape = [
d.event_shape
for d in self._get_single_sample_distributions()]
# Unflattening *after* retrieving from cache prevents tf.Module from
# wrapping the returned value.
return self._model_unflatten(self._cached_event_shape)
def event_shape_tensor(self, sample_shape=(), name='event_shape_tensor'):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
sample_shape: The sample shape under which to evaluate the joint
distribution. Sample shape at root (toplevel) nodes may affect the batch
or event shapes of child nodes.
name: name to give to the op
Returns:
event_shape: `tuple` of `Tensor`s representing the `event_shape` for each
distribution in `model`.
"""
with self._name_and_control_scope(name):
return self._model_unflatten(
self._map_attr_over_dists(
'event_shape_tensor',
dists=(self.sample_distributions(sample_shape)
if sample_shape else None)))
def sample_distributions(self, sample_shape=(), seed=None, value=None,
name='sample_distributions', **kwargs):
"""Generate samples and the (random) distributions.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for generating random numbers.
value: `list` of `Tensor`s in `distribution_fn` order to use to
parameterize other ("downstream") distribution makers.
Default value: `None` (i.e., draw a sample from each distribution).
name: name prepended to ops created by this function.
Default value: `"sample_distributions"`.
**kwargs: This is an alternative to passing a `value`, and achieves the
same effect. Named arguments will be used to parameterize other
dependent ("downstream") distribution-making functions. If a `value`
argument is also provided, raises a ValueError.
Returns:
distributions: a `tuple` of `Distribution` instances for each of
`distribution_fn`.
samples: a `tuple` of `Tensor`s with prepended dimensions `sample_shape`
for each of `distribution_fn`.
"""
with self._name_and_control_scope(name):
ds, xs = self._call_flat_sample_distributions(sample_shape, seed, value,
**kwargs)
return self._model_unflatten(ds), self._model_unflatten(xs)
def log_prob_parts(self, value, name='log_prob_parts'):
"""Log probability density/mass function.
Args:
value: `list` of `Tensor`s in `distribution_fn` order for which we compute
the `log_prob_parts` and to parameterize other ("downstream")
distributions.
name: name prepended to ops created by this function.
Default value: `"log_prob_parts"`.
Returns:
log_prob_parts: a `tuple` of `Tensor`s representing the `log_prob` for
each `distribution_fn` evaluated at each corresponding `value`.
"""
with self._name_and_control_scope(name):
xs = self._map_measure_over_dists('log_prob', value)
return self._model_unflatten(
maybe_check_wont_broadcast(xs, self.validate_args))
def prob_parts(self, value, name='prob_parts'):
"""Log probability density/mass function.
Args:
value: `list` of `Tensor`s in `distribution_fn` order for which we compute
the `prob_parts` and to parameterize other ("downstream") distributions.
name: name prepended to ops created by this function.
Default value: `"prob_parts"`.
Returns:
prob_parts: a `tuple` of `Tensor`s representing the `prob` for
each `distribution_fn` evaluated at each corresponding `value`.
"""
with self._name_and_control_scope(name):
xs = self._map_measure_over_dists('prob', value)
return self._model_unflatten(
maybe_check_wont_broadcast(xs, self.validate_args))
def is_scalar_event(self, name='is_scalar_event'):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor` for each distribution in `model`.
"""
with self._name_and_control_scope(name):
return self._model_unflatten(
[self._is_scalar_helper(shape, shape_tensor) # pylint: disable=g-complex-comprehension
for (shape, shape_tensor) in zip(
self._model_flatten(self.event_shape),
self._model_flatten(self.event_shape_tensor()))])
def is_scalar_batch(self, name='is_scalar_batch'):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor` for each distribution in `model`.
"""
with self._name_and_control_scope(name):
return self._model_unflatten(
self._map_attr_over_dists('is_scalar_batch'))
def _log_prob(self, value):
xs = self._map_measure_over_dists('log_prob', value)
return sum(maybe_check_wont_broadcast(xs, self.validate_args))
@distribution_util.AppendDocstring(kwargs_dict={
'value': ('`Tensor`s structured like `type(model)` used to parameterize '
'other dependent ("downstream") distribution-making functions. '
'Using `None` for any element will trigger a sample from the '
'corresponding distribution. Default value: `None` '
'(i.e., draw a sample from each distribution).'),
'**kwargs:': ('This is an alternative to passing a `value`, and achieves '
'the same effect. Named arguments will be used to '
'parameterize other dependent ("downstream") '
'distribution-making functions. See `value` for more '
'details. If a `value` argument is also provided, raises '
'a `ValueError`.')})
def _sample_n(self, sample_shape, seed, value=None, **kwargs):
if value is not None and kwargs:
keywords = ', '.join(map(str, kwargs))
raise ValueError('Supplied both `value` and keyword arguments to '
'parameterize sampling. Supplied keywords were: '
'{}'.format(keywords))
_, xs = self._call_flat_sample_distributions(sample_shape, seed, value,
**kwargs)
return self._model_unflatten(xs)
def _map_measure_over_dists(self, attr, value):
if any(x is None for x in tf.nest.flatten(value)):
raise ValueError('No `value` part can be `None`; saw: {}.'.format(value))
ds, xs = self._call_flat_sample_distributions(
value=value, seed=dummy_seed())
return (getattr(d, attr)(x) for d, x in zip(ds, xs))
def _map_attr_over_dists(self, attr, dists=None):
dists = (self._get_single_sample_distributions()
if dists is None else dists)
return (getattr(d, attr)() for d in dists)
def _call_flat_sample_distributions(
self, sample_shape=(), seed=None, value=None, **kwargs):
if (value is None) and kwargs:
names = self._flat_resolve_names()
kwargs.update({k: kwargs.get(k) for k in names}) # In place update
value, unmatched_kwargs = _resolve_value_from_args(
[],
kwargs,
dtype=self.dtype,
flat_names=names,
model_flatten_fn=self._model_flatten,
model_unflatten_fn=self._model_unflatten)
if unmatched_kwargs:
join = lambda args: ', '.join(str(j) for j in args)
kwarg_names = join(k for k, v in kwargs.items() if v is not None)
dist_name_str = join(names)
unmatched_str = join(unmatched_kwargs)
raise ValueError(
'Found unexpected keyword arguments. Distribution names '
'are\n{}\nbut received\n{}\nThese names were '
'invalid:\n{}'.format(dist_name_str, kwarg_names, unmatched_str))
if value is not None:
value = self._model_flatten(value)
ds, xs = self._flat_sample_distributions(sample_shape, seed, value)
if not sample_shape and value is None:
# Maybe cache these distributions.
self._get_single_sample_distributions(candidate_dists=ds)
return ds, xs
# Override the base method to capture *args and **kwargs, so we can
# implement more flexible custom calling semantics.
@docstring_util.expand_docstring(
calling_convention_description=CALLING_CONVENTION_DESCRIPTION.format(
method='log_prob', method_abbr='lp'))
def log_prob(self, *args, **kwargs): # pylint: disable=g-doc-args
"""Log probability density/mass function.
${calling_convention_description}
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
kwargs['name'] = kwargs.get('name', 'log_prob')
value, unmatched_kwargs = _resolve_value_from_args(
args,
kwargs,
dtype=self.dtype,
flat_names=self._flat_resolve_names(),
model_flatten_fn=self._model_flatten,
model_unflatten_fn=self._model_unflatten)
return self._call_log_prob(value, **unmatched_kwargs)
# Override the base method to capture *args and **kwargs, so we can
# implement more flexible custom calling semantics.
@docstring_util.expand_docstring(
calling_convention_description=CALLING_CONVENTION_DESCRIPTION.format(
method='prob', method_abbr='prob'))
def prob(self, *args, **kwargs): # pylint: disable=g-doc-args
"""Probability density/mass function.
${calling_convention_description}
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
kwargs['name'] = kwargs.get('name', 'prob')
value, unmatched_kwargs = _resolve_value_from_args(
args,
kwargs,
dtype=self.dtype,
flat_names=self._flat_resolve_names(),
model_flatten_fn=self._model_flatten,
model_unflatten_fn=self._model_unflatten)
return self._call_prob(value, **unmatched_kwargs)
def _flat_resolve_names(self, dummy_name='var'):
"""Resolves a name for each random variable in the model."""
names = []
names_used = set()
for dummy_idx, d in enumerate(self._get_single_sample_distributions()):
name = get_explicit_name_for_component(d)
if name is None:
name = '{}{}'.format(dummy_name, dummy_idx)
if name in names_used:
raise ValueError('Duplicated distribution name: {}'.format(name))
else:
names_used.add(name)
names.append(name)
return names
# We need to bypass base Distribution reshaping logic, so we
# tactically implement the `_call_sample_n` redirector. We don't want to
# override the public level because then tfp.layers can't take generic
# `Distribution.sample` as argument for the `convert_to_tensor_fn` parameter.
def _call_sample_n(self, sample_shape, seed, name, value=None, **kwargs):
with self._name_and_control_scope(name):
return self._sample_n(
sample_shape,
seed=seed() if callable(seed) else seed,
value=value,
**kwargs)
def _default_event_space_bijector(self):
return _DefaultJointBijector(self)
def get_explicit_name_for_component(d):
"""Returns the explicitly-passed `name` of a Distribution, or None."""
name = d.parameters.get('name', None)
if name and d.__class__.__name__ in name:
name = None
if name and hasattr(d, '__init__'):
spec = tf_inspect.getfullargspec(d.__init__)
default_name = dict(
zip(spec.args[len(spec.args) - len(spec.defaults or ()):],
spec.defaults or ())
).get('name', None)
if name == default_name:
name = None
if name in FORBIDDEN_COMPONENT_NAMES:
raise ValueError('Distribution name "{}" is not allowed as a '
'JointDistribution component; please choose a different '
'name.'.format(name))
return name
def _resolve_value_from_args(args,
kwargs,
dtype,
flat_names,
model_flatten_fn,
model_unflatten_fn):
"""Resolves a `value` structure matching `dtype` from a function call.
This offers semantics equivalent to a Python callable `f(x1, x2, ..., xN)`,
where `'x1', 'x2', ..., 'xN' = self._flat_resolve_names()` are the names of
the model's component distributions. Arguments may be passed by position
(`f(1., 2., 3.)`), by name (`f(x1=1., x2=2., x3=3.)`), or by a combination
of approaches (`f(1., 2., x3=3.)`).
Passing a `value` structure directly (as in `jd.log_prob(jd.sample())`) is
supported by an optional `value` kwarg (`f(value=[1., 2., 3.])`), or by
simply passing the value as the sole positional argument
(`f([1., 2., 3.])`). For models having only a single component, a single
positional argument that matches the structural type (e.g., a single Tensor,
or a nested list or dict of Tensors) of that component is interpreted as
specifying it; otherwise a single positional argument is interpreted as
the overall `value`.
Args:
args: Positional arguments passed to the function being called.
kwargs: Keyword arguments passed to the function being called.
dtype: Nested structure of `dtype`s of model components.
flat_names: Iterable of Python `str` names of model components.
model_flatten_fn: Python `callable` that takes a structure and returns a
list representing the flattened structure.
model_unflatten_fn: Python `callable` that takes an iterable and returns a
structure.
Returns:
value: A structure in which the observed arguments are arranged to match
`dtype`.
unmatched_kwargs: Python `dict` containing any keyword arguments that don't
correspond to model components.
Raises:
ValueError: if the number of args passed doesn't match the number of
model components, or if positional arguments are passed to a dict-valued
distribution.
"""
value = kwargs.pop('value', None)
if value is not None: # Respect 'value' as an explicit kwarg.
return value, kwargs
matched_kwargs = {k for k in flat_names if k in kwargs}
unmatched_kwargs = {k: v for (k, v) in kwargs.items()
if k not in matched_kwargs}
# If we have only a single positional arg, we need to disambiguate it by
# examining the model structure.
if len(args) == 1 and not matched_kwargs:
if len(dtype) > 1: # Model has multiple variables; arg must be a structure.
return args[0], unmatched_kwargs
# Otherwise the model has one variable. If its structure matches the arg,
# interpret the arg as its value.
first_component_dtype = model_flatten_fn(dtype)[0]
try:
# TODO(davmre): this assertion will falsely trigger if args[0] contains
# nested lists that the user intends to be converted to Tensor. We should
# try to relax it slightly (without creating false negatives).
tf.nest.assert_same_structure(
first_component_dtype, args[0], check_types=False)
return model_unflatten_fn(args), unmatched_kwargs
except (ValueError, TypeError): # If RV doesn't match the arg, interpret
return args[0], unmatched_kwargs # the arg as a 'value' structure.
num_components_specified = len(args) + len(kwargs) - len(unmatched_kwargs)
if num_components_specified != len(flat_names):
raise ValueError('Joint distribution expected values for {} components {}; '
'saw {} (from args {} and kwargs {}).'.format(
len(flat_names),
flat_names,
num_components_specified,
args,
kwargs))
if args and (isinstance(dtype, dict) and not
isinstance(dtype, collections.OrderedDict)):
raise ValueError("Joint distribution with unordered variables can't "
"take positional args (saw {}).".format(args))
value = model_unflatten_fn(kwargs[k] if k in kwargs else args[i]
for i, k in enumerate(flat_names))
return value, unmatched_kwargs
def maybe_check_wont_broadcast(flat_xs, validate_args):
"""Verifies that `parts` don't broadcast."""
flat_xs = tuple(flat_xs) # So we can receive generators.
if not validate_args:
# Note: we don't try static validation because it is theoretically
# possible that a user wants to take advantage of broadcasting.
# Only when `validate_args` is `True` do we enforce the validation.
return flat_xs
msg = 'Broadcasting probably indicates an error in model specification.'
s = tuple(prefer_static.shape(x) for x in flat_xs)
if all(prefer_static.is_numpy(s_) for s_ in s):
if not all(np.all(a == b) for a, b in zip(s[1:], s[:-1])):
raise ValueError(msg)
return flat_xs
assertions = [assert_util.assert_equal(a, b, message=msg)
for a, b in zip(s[1:], s[:-1])]
with tf.control_dependencies(assertions):
return tuple(tf.identity(x) for x in flat_xs)
# TODO(b/162764645): Implement as a Composite bijector.
# The Composite CL generalizes Chain to arbitrary bijector DAGs. It will:
# 1) Define an abstract `CompositeBijector` class (for any bijector that
# wraps other bijectors, and does nothing else)
# 2) Express `Chain` and friends (including this) in terms of Composite.
# 3) Introduce `JointMap` (this class sans coroutine)
# 4) Introduce `Restructure`, as Chain+JM are pretty useless without it.
class _DefaultJointBijector(bijector_lib.Bijector):
"""Minimally-viable event space bijector for `JointDistribution`."""
# TODO(b/148485798): Support joint bijectors in TransformedDistribution.
def __init__(self, jd):
with tf.name_scope('default_joint_bijector') as name:
structure = tf.nest.map_structure(lambda _: None, jd.dtype)
super(_DefaultJointBijector, self).__init__(
forward_min_event_ndims=structure,
inverse_min_event_ndims=structure,
validate_args=jd.validate_args,
name=name)
self._jd = jd
def _check_inputs_not_none(self, value):
if any(x is None for x in tf.nest.flatten(value)):
raise ValueError('No `value` part can be `None`; saw: {}.'.format(value))
# pylint: disable=protected-access
def _evaluate_bijector(self, bijector_fn, values):
gen = self._jd._model_coroutine()
outputs = []
d = next(gen)
index = 0
try:
while True:
dist = d.distribution if type(d).__name__ == 'Root' else d
bijector = dist._experimental_default_event_space_bijector()
# For discrete distributions, the default event space bijector is None.
# For a joint distribution's discrete components, we want the behavior
# of the Identity bijector.
bijector = (identity_bijector.Identity()
if bijector is None else bijector)
out, y = bijector_fn(bijector, values[index])
outputs.append(out)
d = gen.send(y)
index += 1
except StopIteration:
pass
return outputs
def _event_shapes(self, input_shapes, event_shape_attr):
"""For forward/inverse static event shapes."""
input_shapes = self._jd._model_flatten(input_shapes)
support_bijectors = [
d._experimental_default_event_space_bijector()
for d in self._jd._get_single_sample_distributions()]
output_shapes = [
getattr(bijector, event_shape_attr)(input_shape)
for (bijector, input_shape) in zip(support_bijectors, input_shapes)]
return self._jd._model_unflatten(output_shapes)
# We override the public methods so that the `default_event_space_bijector`s
# of the component distributions, instead of that of the `JointDistribution`,
# hit the global bijector cache.
def forward(self, values, name=None):
with tf.name_scope(name or 'forward'):
values = self._jd._model_flatten(values)
self._check_inputs_not_none(values)
def bijector_fn(bijector, value):
y = bijector.forward(value)
return y, y
out = self._evaluate_bijector(bijector_fn, values)
return self._jd._model_unflatten(out)
def inverse(self, values, name=None):
with tf.name_scope(name or 'inverse'):
self._check_inputs_not_none(values)
values = self._jd._model_flatten(values)
def bijector_fn(bijector, value):
x = bijector.inverse(value)
return x, value
out = self._evaluate_bijector(bijector_fn, values)
return self._jd._model_unflatten(out)
def forward_log_det_jacobian(self, values, event_ndims, name=None):
with tf.name_scope(name or 'forward_log_det_jacobian'):
self._check_inputs_not_none(values)
values = self._jd._model_flatten(values)
event_ndims = self._jd._model_flatten(event_ndims)
def bijector_fn(bijector, value):
x, event_ndims = value
y = bijector.forward(x)
fldj = bijector.forward_log_det_jacobian(x, event_ndims)
return fldj, y
fldjs = self._evaluate_bijector(bijector_fn,
list(zip(values, event_ndims)))
return sum(fldjs)
def inverse_log_det_jacobian(self, values, event_ndims, name=None):
with tf.name_scope(name or 'inverse_log_det_jacobian'):
self._check_inputs_not_none(values)
values = self._jd._model_flatten(values)
event_ndims = self._jd._model_flatten(event_ndims)
def bijector_fn(bijector, value):
y, event_ndims = value
ildj = bijector.inverse_log_det_jacobian(y, event_ndims)
return ildj, y
ildjs = self._evaluate_bijector(bijector_fn,
list(zip(values, event_ndims)))
return sum(ildjs)
# pylint: enable=protected-access
# TODO(b/148485931): Fix bijector caching.
def forward_event_shape(self, input_shapes):
return self._event_shapes(input_shapes, 'forward_event_shape')
def forward_event_shape_tensor(self, input_shapes, name=None):
with tf.name_scope(name or 'forward_event_shape_tensor'):
self._check_inputs_not_none(input_shapes)
return self._event_shapes(input_shapes, 'forward_event_shape_tensor')
def inverse_event_shape(self, output_shapes):
return self._event_shapes(output_shapes, 'inverse_event_shape')
def inverse_event_shape_tensor(self, output_shapes, name=None):
with tf.name_scope('inverse_event_shape_tensor'):
self._check_inputs_not_none(output_shapes)
return self._event_shapes(output_shapes, 'inverse_event_shape_tensor')
| 41.401949 | 97 | 0.688006 |
acded737e3474db184f847152cb0ca1d343bcd70 | 321 | py | Python | lnbits/extensions/splitpayments/views.py | taxmeifyoucan/lnbits | 19ae1ddf0d50b507135c418af9d5becc336d5ce3 | [
"MIT"
] | 258 | 2020-04-27T21:36:21.000Z | 2021-10-30T23:24:48.000Z | lnbits/extensions/splitpayments/views.py | taxmeifyoucan/lnbits | 19ae1ddf0d50b507135c418af9d5becc336d5ce3 | [
"MIT"
] | 283 | 2020-04-27T17:23:12.000Z | 2021-11-01T10:07:20.000Z | lnbits/extensions/splitpayments/views.py | taxmeifyoucan/lnbits | 19ae1ddf0d50b507135c418af9d5becc336d5ce3 | [
"MIT"
] | 109 | 2020-04-28T06:00:17.000Z | 2021-10-13T02:48:28.000Z | from quart import g, render_template
from lnbits.decorators import check_user_exists, validate_uuids
from . import splitpayments_ext
@splitpayments_ext.route("/")
@validate_uuids(["usr"], required=True)
@check_user_exists()
async def index():
return await render_template("splitpayments/index.html", user=g.user)
| 24.692308 | 73 | 0.788162 |
acded77e8e913db98546fb663bfc8654fa5efed0 | 1,592 | py | Python | axbench/inversek2j_mixed/script/qos.py | minhhn2910/CUDA-mixed-precision | 4b79702dd9678bb2fa00ec0ae68965307f1ccc40 | [
"MIT"
] | 2 | 2021-11-15T08:18:57.000Z | 2021-11-15T10:47:06.000Z | axbench/inversek2j_mixed/script/qos.py | minhhn2910/CUDA-mixed-precision | 4b79702dd9678bb2fa00ec0ae68965307f1ccc40 | [
"MIT"
] | null | null | null | axbench/inversek2j_mixed/script/qos.py | minhhn2910/CUDA-mixed-precision | 4b79702dd9678bb2fa00ec0ae68965307f1ccc40 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import math
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def printUsage():
print "Usage: python qos.py <original file> <nn file>"
exit(1)
pass;
def findTarget(theta1, theta2, theta3):
xTgt = math.cos(theta1) + math.cos(theta1+theta2) + math.cos(theta1+theta2+theta3)
yTgt = math.sin(theta1) + math.sin(theta1+theta2) + math.sin(theta1+theta2+theta3)
return (xTgt, yTgt)
pass
def distance(x1, y1, x2, y2):
return math.sqrt((x1 - x2)*(x1 - x2) + (y1 - y2) * (y1 - y2))
pass
if(len(sys.argv) != 3):
printUsage()
origFilename = sys.argv[1]
nnFilename = sys.argv[2]
origLines = open(origFilename).readlines()
nnLines = open(nnFilename).readlines()
total = 0.0
for i in range(len(origLines)):
origLine = origLines[i].rstrip()
nnLine = nnLines[i].rstrip()
originalSplitted = origLine.split(" ")
nnSplitted = nnLine.split(" ")
(xOrig, yOrig) = findTarget(float(originalSplitted[2]) * (math.pi / 180.0), float(originalSplitted[3])* (math.pi / 180.0), float(originalSplitted[4])* (math.pi / 180.0))
(xNN, yNN) = findTarget(float(nnSplitted[2])* (math.pi / 180.0), float(nnSplitted[3])* (math.pi / 180.0), float(nnSplitted[4])* (math.pi / 180.0))
total += (distance(xOrig, yOrig, xNN, yNN) / math.sqrt(xOrig*xOrig + yOrig*yOrig))
print bcolors.FAIL + "*** Error: %1.2f%%" % ((total/float(len(origLines))) * 100) + bcolors.ENDC
| 28.428571 | 174 | 0.625628 |
acded82bff43a3850420ae790ff80d4def300332 | 3,835 | py | Python | city_scrapers/spiders/chi_development_fund.py | sameerchandra/city-scrapers | c6e466a06f610e56fa876b6e93a53a347d732536 | [
"MIT"
] | null | null | null | city_scrapers/spiders/chi_development_fund.py | sameerchandra/city-scrapers | c6e466a06f610e56fa876b6e93a53a347d732536 | [
"MIT"
] | 1 | 2019-10-05T04:05:48.000Z | 2019-10-05T04:05:48.000Z | city_scrapers/spiders/chi_development_fund.py | firejava/city-scrapers | 749f40bf1bd933726768d7d67e5211aef13af547 | [
"MIT"
] | null | null | null | import re
import dateutil.parser
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class ChiDevelopmentFundSpider(CityScrapersSpider):
name = 'chi_development_fund'
agency = 'Chicago Development Fund'
timezone = 'America/Chicago'
allowed_domains = ['www.chicago.gov']
start_urls = [
'https://www.chicago.gov/city/en/depts/dcd/supp_info/chicago_developmentfund.html'
]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
columns = self.parse_meetings(response)
for column in columns:
meeting_date_xpath = """text()[normalize-space()]|
p/text()[normalize-space()]|
ul//text()[normalize-space()]"""
meetings = column.xpath(meeting_date_xpath).extract()
meetings = self.format_meetings(meetings)
for item in meetings:
start = self._parse_start(item)
if start is None:
continue
meeting = Meeting(
title=self._parse_title(item),
description='',
classification=COMMISSION,
start=start,
end=None,
time_notes='See agenda for time',
all_day=False,
location={
'name': 'City Hall',
'address': '121 N LaSalle St, Room 1000, Chicago, IL 60602'
},
source=response.url,
links=self._parse_links(column, item, response),
)
meeting['id'] = self._get_id(meeting)
meeting['status'] = self._get_status(meeting)
yield meeting
@staticmethod
def format_meetings(meetings):
# translate and filter out non-printable spaces
meetings = [meeting.replace('\xa0', ' ').strip() for meeting in meetings]
meetings = list(filter(None, meetings))
return meetings
@staticmethod
def parse_meetings(response):
meeting_xpath = """
//td[preceding::strong[1]/text()[
contains(., "Meetings")
]]"""
return response.xpath(meeting_xpath)
@staticmethod
def _parse_title(meeting):
if 'advisory' in meeting.lower():
return 'Advisory Board'
return 'Board of Directors'
@staticmethod
def _parse_start(meeting):
# Not all dates on site a valid dates (e.g. Jan. 2011), so try to parse
# and return none if not possible
clean_str = re.sub(r'[\.,]', '', meeting)
date_str = re.search(r'[a-zA-z]{1,10} \d{1,2} \d{4}', clean_str)
if not date_str:
return
return dateutil.parser.parse(date_str.group())
def _parse_links(self, item, meeting, response):
# Find <a> tags where 1st, non-blank, preceding text = meeting (e.g. 'Jan 16')
# site is pretty irregular and text is sometimes nested, so check siblings children
# for meeting name if not found for sibling
anchor_xpath = """
//a[preceding-sibling::text()[normalize-space()][1][contains(., "{}")]]
""".format(meeting)
documents = item.xpath(anchor_xpath)
if len(documents) >= 0:
return [{
'href': response.urljoin(document.xpath('@href').extract_first()),
'title': document.xpath('text()').extract_first()
} for document in documents]
return []
| 38.35 | 91 | 0.558801 |
acded8552e6f8a340107539452249b5d64d04472 | 5,359 | py | Python | carla_ad_agent/src/carla_ad_agent/basic_agent.py | s-hillerk/ros-bridge | a096e936643273d73c68eea5e69bfc7c44ef26a3 | [
"MIT"
] | null | null | null | carla_ad_agent/src/carla_ad_agent/basic_agent.py | s-hillerk/ros-bridge | a096e936643273d73c68eea5e69bfc7c44ef26a3 | [
"MIT"
] | null | null | null | carla_ad_agent/src/carla_ad_agent/basic_agent.py | s-hillerk/ros-bridge | a096e936643273d73c68eea5e69bfc7c44ef26a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
import math
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose
from derived_object_msgs.msg import ObjectArray
from carla_msgs.msg import CarlaActorList
from agent import Agent, AgentState # pylint: disable=relative-import
from local_planner import LocalPlanner # pylint: disable=relative-import
from carla_waypoint_types.srv import GetActorWaypoint
class BasicAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, role_name, ego_vehicle_id, avoid_risk=True):
"""
"""
super(BasicAgent, self).__init__(role_name, ego_vehicle_id, avoid_risk)
self._avoid_risk = avoid_risk
self._current_speed = 0.0 # Km/h
self._current_pose = Pose()
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.9,
'K_D': 0.0,
'K_I': 0.1}
self._local_planner = LocalPlanner(opt_dict={'lateral_control_dict': args_lateral_dict})
if self._avoid_risk:
self._vehicle_id_list = []
self._lights_id_list = []
self._actors_subscriber = rospy.Subscriber(
"/carla/actor_list", CarlaActorList, self.actors_updated)
self._objects = []
self._objects_subscriber = rospy.Subscriber(
"/carla/{}/objects".format(role_name), ObjectArray, self.objects_updated)
self._get_actor_waypoint_client = rospy.ServiceProxy(
'/carla_waypoint_publisher/{}/get_actor_waypoint'.format(role_name),
GetActorWaypoint)
self._odometry_subscriber = rospy.Subscriber(
"/carla/{}/odometry".format(role_name), Odometry, self.odometry_updated)
def get_actor_waypoint(self, actor_id):
"""
helper method to get waypoint for actor via ros service
Only used if risk should be avoided.
"""
try:
response = self._get_actor_waypoint_client(actor_id)
return response.waypoint
except (rospy.ServiceException, rospy.ROSInterruptException) as e:
if not rospy.is_shutdown:
rospy.logwarn("Service call failed: {}".format(e))
def odometry_updated(self, odo):
"""
callback on new odometry
"""
self._current_speed = math.sqrt(odo.twist.twist.linear.x ** 2 +
odo.twist.twist.linear.y ** 2 +
odo.twist.twist.linear.z ** 2) * 3.6
self._current_pose = odo.pose.pose
super(BasicAgent, self).odometry_updated(odo)
def actors_updated(self, actors):
"""
callback on new actor list
Only used if risk should be avoided.
"""
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
self._vehicle_id_list = []
self._lights_id_list = []
for actor in actors.actors:
if "vehicle" in actor.type:
self._vehicle_id_list.append(actor.id)
elif "traffic_light" in actor.type:
self._lights_id_list.append(
(actor.id, self.get_actor_waypoint(actor.id)))
def objects_updated(self, objects):
"""
callback on new objects
Only used if risk should be avoided.
"""
self._objects = objects.objects
def run_step(self, target_speed):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
finished = False
# is there an obstacle in front of us?
hazard_detected = False
if self._avoid_risk:
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard( # pylint: disable=unused-variable
self._vehicle_id_list, self._objects)
if vehicle_state:
#rospy.loginfo('=== Vehicle blocking ahead [{}])'.format(vehicle))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red( # pylint: disable=unused-variable
self._lights_id_list)
if light_state:
#rospy.loginfo('=== Red Light ahead [{}])'.format(traffic_light))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control, finished = self._local_planner.run_step(
target_speed, self._current_speed, self._current_pose)
return control, finished
| 37.475524 | 96 | 0.623997 |
acded8a9f19a2cdc96fd2009c492c2842c188384 | 570 | py | Python | data_tracker/tasks.py | ahl54/Tracker | 7128aa54ea4180f8bff2500d13d2995bf7f81810 | [
"MIT"
] | null | null | null | data_tracker/tasks.py | ahl54/Tracker | 7128aa54ea4180f8bff2500d13d2995bf7f81810 | [
"MIT"
] | null | null | null | data_tracker/tasks.py | ahl54/Tracker | 7128aa54ea4180f8bff2500d13d2995bf7f81810 | [
"MIT"
] | null | null | null | from celery import task, current_task
from celery.result import AsyncResult
from time import sleep
from tracker import models
NUM_OBJ_TO_CREATE = 1000
# when this task is called, it will create 1000 objects in the database
@task()
def create_models():
for i in range(1, NUM_OBJ_TO_CREATE+1):
fn = 'Fn %s' % i
ln = 'Ln %s' % i
my_model = models.MyModel(fn=fn, ln=ln)
my_model.save()
process_percent = int(100 * float(i) / float(NUM_OBJ_TO_CREATE))
sleep(0.1)
current_task.update_state(state='PROGRESS',
meta={'process_percent': process_percent}) | 25.909091 | 71 | 0.726316 |
acdedbee6df4944e7548db132653eaebf8bc1e44 | 3,847 | py | Python | third_party/nucleus/io/python/reference_wrap_test.py | zyxue/deepvariant | ea4301049539d0b16263d08a62b97442427ada7c | [
"BSD-3-Clause"
] | 1 | 2018-10-24T01:18:56.000Z | 2018-10-24T01:18:56.000Z | third_party/nucleus/io/python/reference_wrap_test.py | CNaibon/deepvariant | ea4301049539d0b16263d08a62b97442427ada7c | [
"BSD-3-Clause"
] | null | null | null | third_party/nucleus/io/python/reference_wrap_test.py | CNaibon/deepvariant | ea4301049539d0b16263d08a62b97442427ada7c | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for GenomeReference CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from third_party.nucleus.io.python import reference_fai
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import ranges
class WrapReferenceTest(parameterized.TestCase):
@parameterized.parameters('test.fasta', 'test.fasta.gz')
def test_wrap(self, fasta_filename):
chr_names = ['chrM', 'chr1', 'chr2']
chr_lengths = [100, 76, 121]
fasta = test_utils.genomics_core_testdata(fasta_filename)
fai = test_utils.genomics_core_testdata(fasta_filename + '.fai')
with reference_fai.GenomeReferenceFai.from_file(fasta, fai) as ref:
self.assertEqual(ref.contig_names, chr_names)
self.assertEqual(ref.bases(ranges.make_range('chrM', 1, 10)), 'ATCACAGGT')
self.assertTrue(ref.is_valid_interval(ranges.make_range('chrM', 1, 10)))
self.assertFalse(
ref.is_valid_interval(ranges.make_range('chrM', 1, 100000)))
self.assertEqual(len(ref.contigs), 3)
self.assertEqual([c.name for c in ref.contigs], chr_names)
self.assertEqual([c.n_bases for c in ref.contigs], chr_lengths)
for contig in ref.contigs:
self.assertEqual(ref.contig(contig.name), contig)
self.assertTrue(ref.has_contig(contig.name))
self.assertFalse(ref.has_contig(contig.name + '.unknown'))
@parameterized.parameters(
# The fasta and the FAI are both missing.
('missing.fasta', 'missing.fasta.fai'),
# The fasta is present but the FAI is missing.
('test.fasta', 'missing.fasta.fai'),
# The fasta is missing but the FAI is present.
('missing.fasta', 'test.fasta.fai'),
)
def test_from_file_raises_with_missing_inputs(self, fasta_filename,
fai_filename):
fasta = test_utils.genomics_core_testdata(fasta_filename)
fai = test_utils.genomics_core_testdata(fai_filename)
with self.assertRaisesRegexp(
ValueError,
'Not found: could not load fasta and/or fai for fasta ' + fasta):
reference_fai.GenomeReferenceFai.from_file(fasta, fai)
if __name__ == '__main__':
absltest.main()
| 44.218391 | 80 | 0.741357 |
acdedc1e62480a31b852629fd18e21e20ee72c8b | 613 | py | Python | hub/api/serializers/project.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | null | null | null | hub/api/serializers/project.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | 2 | 2020-06-05T19:41:09.000Z | 2021-06-10T21:07:30.000Z | hub/api/serializers/project.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from hub.models import Project
class ProjectSerializer(serializers.ModelSerializer):
created_by = serializers.SerializerMethodField()
class Meta:
model = Project
fields = [
'id',
'name',
'uuid',
'created_by',
'created_at',
'modified_by',
'modified_at',
]
read_only_fields = ['uuid', 'created_by', 'created_at', 'modified_by',
'modified_at', ]
def get_created_by(self, obj):
return obj.created_by.username | 24.52 | 78 | 0.559543 |
acdedc5492a972264d1325fb201f1deefe7a4376 | 2,476 | py | Python | tests/test_oauth.py | czervenka/gapi | 7926ce3f82ada543b2fa189756b50906d3ab3952 | [
"Apache-2.0"
] | 1 | 2018-09-03T03:44:02.000Z | 2018-09-03T03:44:02.000Z | tests/test_oauth.py | czervenka/gapi | 7926ce3f82ada543b2fa189756b50906d3ab3952 | [
"Apache-2.0"
] | null | null | null | tests/test_oauth.py | czervenka/gapi | 7926ce3f82ada543b2fa189756b50906d3ab3952 | [
"Apache-2.0"
] | null | null | null | _multiprocess_can_split_ = False
from init import setUp
setUp()
from unittest import TestCase
from mock import patch
import mock
SERVICE_KEY = 'test_generated_key.pem'
SERVICE_EMAIL = 'service_account@example.com'
USER_EMAIL = 'user@example.com'
USER2_EMAIL = 'user2@example.com'
class DictObject(object):
def __init__(self, **properties):
self.__dict__.update(properties)
def get_gae_mock_fetch():
fetch = mock.MagicMock()
fetch.__call__ = mock.MagicMock()
return fetch
def get_gae_mock_memcache():
memcache = mock.MagicMock()
memcache.get, memcache.set = mock.MagicMock(), mock.MagicMock()
memcache.get.return_value = None
return memcache
class TestToken(TestCase):
@patch('google.appengine.api.urlfetch.fetch', get_gae_mock_fetch())
@patch('google.appengine.api.memcache', get_gae_mock_memcache())
def test_gets_token(self):
'Token tests (offline)'
from gapi import oauth2, exceptions
from google.appengine.api.urlfetch import fetch
from google.appengine.api import memcache
token = oauth2.TokenRequest(SERVICE_EMAIL, SERVICE_KEY, 'https://www.googleapis.com/auth/calendar', USER_EMAIL)
self.assertFalse(fetch.called, msg="Token fetched before needed (lazy evaluation failed).")
fetch.return_value = DictObject(**{
'status_code': 200,
'content': '{"access_token" : "1/8xbJqaOZXSUZbHLl5EOtu1pxz3fmmetKx9W8CV4t79M", "token_type" : "Bearer", "expires_in" : 3250 }'
})
fetch.return_value.headers = {}
token._lock, token._unlock = lambda : True, lambda : True
header = str(token)
self.assertEquals(header, 'Bearer 1/8xbJqaOZXSUZbHLl5EOtu1pxz3fmmetKx9W8CV4t79M', msg="The token string representation does not conform values from auth server")
self.assertEquals(memcache.get.call_count, 1, msg="Token tries to retrieve the key from memcahced first.")
self.assertEquals(memcache.set.call_args[0][2], 3250-300, msg="The memcache expiration equals expiration of the token got from auth server minus 300s.")
fetch.return_value.status_code = 403
fetch.return_value.content = '{"error": "invalid_grant"}'
token = oauth2.TokenRequest(SERVICE_EMAIL, SERVICE_KEY, 'https://www.googleapis.com/auth/calendar', USER_EMAIL)
token._lock, token._unlock = lambda : True, lambda : True
self.assertRaises(exceptions.InvalidGrantException, token.get_token)
| 39.935484 | 169 | 0.71567 |
acdedce75848e5598e434b79a749e19200530dda | 24,651 | py | Python | sdk/python/pulumi_azure/eventhub/event_subscription.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/eventhub/event_subscription.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/eventhub/event_subscription.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['EventSubscription']
warnings.warn("""azure.eventhub.EventSubscription has been deprecated in favor of azure.eventgrid.EventSubscription""", DeprecationWarning)
class EventSubscription(pulumi.CustomResource):
warnings.warn("""azure.eventhub.EventSubscription has been deprecated in favor of azure.eventgrid.EventSubscription""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
advanced_filter: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionAdvancedFilterArgs']]] = None,
azure_function_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionAzureFunctionEndpointArgs']]] = None,
event_delivery_schema: Optional[pulumi.Input[str]] = None,
eventhub_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionEventhubEndpointArgs']]] = None,
eventhub_endpoint_id: Optional[pulumi.Input[str]] = None,
expiration_time_utc: Optional[pulumi.Input[str]] = None,
hybrid_connection_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionHybridConnectionEndpointArgs']]] = None,
hybrid_connection_endpoint_id: Optional[pulumi.Input[str]] = None,
included_event_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
retry_policy: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionRetryPolicyArgs']]] = None,
scope: Optional[pulumi.Input[str]] = None,
service_bus_queue_endpoint_id: Optional[pulumi.Input[str]] = None,
service_bus_topic_endpoint_id: Optional[pulumi.Input[str]] = None,
storage_blob_dead_letter_destination: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionStorageBlobDeadLetterDestinationArgs']]] = None,
storage_queue_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionStorageQueueEndpointArgs']]] = None,
subject_filter: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionSubjectFilterArgs']]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
webhook_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionWebhookEndpointArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an EventGrid Event Subscription
## Example Usage
```python
import pulumi
import pulumi_azure as azure
default_resource_group = azure.core.ResourceGroup("defaultResourceGroup", location="West US 2")
default_account = azure.storage.Account("defaultAccount",
resource_group_name=default_resource_group.name,
location=default_resource_group.location,
account_tier="Standard",
account_replication_type="LRS",
tags={
"environment": "staging",
})
default_queue = azure.storage.Queue("defaultQueue", storage_account_name=default_account.name)
default_event_subscription = azure.eventgrid.EventSubscription("defaultEventSubscription",
scope=default_resource_group.id,
storage_queue_endpoint=azure.eventgrid.EventSubscriptionStorageQueueEndpointArgs(
storage_account_id=default_account.id,
queue_name=default_queue.name,
))
```
## Import
EventGrid Event Subscription's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/eventSubscription:EventSubscription eventSubscription1
```
/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventGrid/topics/topic1/providers/Microsoft.EventGrid/eventSubscriptions/eventSubscription1
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['EventSubscriptionAdvancedFilterArgs']] advanced_filter: A `advanced_filter` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionAzureFunctionEndpointArgs']] azure_function_endpoint: An `azure_function_endpoint` block as defined below.
:param pulumi.Input[str] event_delivery_schema: Specifies the event delivery schema for the event subscription. Possible values include: `EventGridSchema`, `CloudEventSchemaV1_0`, `CustomInputSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['EventSubscriptionEventhubEndpointArgs']] eventhub_endpoint: A `eventhub_endpoint` block as defined below.
:param pulumi.Input[str] eventhub_endpoint_id: Specifies the id where the Event Hub is located.
:param pulumi.Input[str] expiration_time_utc: Specifies the expiration time of the event subscription (Datetime Format `RFC 3339`).
:param pulumi.Input[pulumi.InputType['EventSubscriptionHybridConnectionEndpointArgs']] hybrid_connection_endpoint: A `hybrid_connection_endpoint` block as defined below.
:param pulumi.Input[str] hybrid_connection_endpoint_id: Specifies the id where the Hybrid Connection is located.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_event_types: A list of applicable event types that need to be part of the event subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: A list of labels to assign to the event subscription.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['EventSubscriptionRetryPolicyArgs']] retry_policy: A `retry_policy` block as defined below.
:param pulumi.Input[str] scope: Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_bus_queue_endpoint_id: Specifies the id where the Service Bus Queue is located.
:param pulumi.Input[str] service_bus_topic_endpoint_id: Specifies the id where the Service Bus Topic is located.
:param pulumi.Input[pulumi.InputType['EventSubscriptionStorageBlobDeadLetterDestinationArgs']] storage_blob_dead_letter_destination: A `storage_blob_dead_letter_destination` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionStorageQueueEndpointArgs']] storage_queue_endpoint: A `storage_queue_endpoint` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionSubjectFilterArgs']] subject_filter: A `subject_filter` block as defined below.
:param pulumi.Input[str] topic_name: (Optional/ **Deprecated) Specifies the name of the topic to associate with the event subscription.
:param pulumi.Input[pulumi.InputType['EventSubscriptionWebhookEndpointArgs']] webhook_endpoint: A `webhook_endpoint` block as defined below.
"""
pulumi.log.warn("EventSubscription is deprecated: azure.eventhub.EventSubscription has been deprecated in favor of azure.eventgrid.EventSubscription")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['advanced_filter'] = advanced_filter
__props__['azure_function_endpoint'] = azure_function_endpoint
__props__['event_delivery_schema'] = event_delivery_schema
if eventhub_endpoint is not None:
warnings.warn("""Deprecated in favour of `eventhub_endpoint_id`""", DeprecationWarning)
pulumi.log.warn("eventhub_endpoint is deprecated: Deprecated in favour of `eventhub_endpoint_id`")
__props__['eventhub_endpoint'] = eventhub_endpoint
__props__['eventhub_endpoint_id'] = eventhub_endpoint_id
__props__['expiration_time_utc'] = expiration_time_utc
if hybrid_connection_endpoint is not None:
warnings.warn("""Deprecated in favour of `hybrid_connection_endpoint_id`""", DeprecationWarning)
pulumi.log.warn("hybrid_connection_endpoint is deprecated: Deprecated in favour of `hybrid_connection_endpoint_id`")
__props__['hybrid_connection_endpoint'] = hybrid_connection_endpoint
__props__['hybrid_connection_endpoint_id'] = hybrid_connection_endpoint_id
__props__['included_event_types'] = included_event_types
__props__['labels'] = labels
__props__['name'] = name
__props__['retry_policy'] = retry_policy
if scope is None:
raise TypeError("Missing required property 'scope'")
__props__['scope'] = scope
__props__['service_bus_queue_endpoint_id'] = service_bus_queue_endpoint_id
__props__['service_bus_topic_endpoint_id'] = service_bus_topic_endpoint_id
__props__['storage_blob_dead_letter_destination'] = storage_blob_dead_letter_destination
__props__['storage_queue_endpoint'] = storage_queue_endpoint
__props__['subject_filter'] = subject_filter
if topic_name is not None:
warnings.warn("""This field has been updated to readonly field since Apr 25, 2019 so no longer has any affect and will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("topic_name is deprecated: This field has been updated to readonly field since Apr 25, 2019 so no longer has any affect and will be removed in version 3.0 of the provider.")
__props__['topic_name'] = topic_name
__props__['webhook_endpoint'] = webhook_endpoint
super(EventSubscription, __self__).__init__(
'azure:eventhub/eventSubscription:EventSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
advanced_filter: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionAdvancedFilterArgs']]] = None,
azure_function_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionAzureFunctionEndpointArgs']]] = None,
event_delivery_schema: Optional[pulumi.Input[str]] = None,
eventhub_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionEventhubEndpointArgs']]] = None,
eventhub_endpoint_id: Optional[pulumi.Input[str]] = None,
expiration_time_utc: Optional[pulumi.Input[str]] = None,
hybrid_connection_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionHybridConnectionEndpointArgs']]] = None,
hybrid_connection_endpoint_id: Optional[pulumi.Input[str]] = None,
included_event_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
retry_policy: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionRetryPolicyArgs']]] = None,
scope: Optional[pulumi.Input[str]] = None,
service_bus_queue_endpoint_id: Optional[pulumi.Input[str]] = None,
service_bus_topic_endpoint_id: Optional[pulumi.Input[str]] = None,
storage_blob_dead_letter_destination: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionStorageBlobDeadLetterDestinationArgs']]] = None,
storage_queue_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionStorageQueueEndpointArgs']]] = None,
subject_filter: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionSubjectFilterArgs']]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
webhook_endpoint: Optional[pulumi.Input[pulumi.InputType['EventSubscriptionWebhookEndpointArgs']]] = None) -> 'EventSubscription':
"""
Get an existing EventSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['EventSubscriptionAdvancedFilterArgs']] advanced_filter: A `advanced_filter` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionAzureFunctionEndpointArgs']] azure_function_endpoint: An `azure_function_endpoint` block as defined below.
:param pulumi.Input[str] event_delivery_schema: Specifies the event delivery schema for the event subscription. Possible values include: `EventGridSchema`, `CloudEventSchemaV1_0`, `CustomInputSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['EventSubscriptionEventhubEndpointArgs']] eventhub_endpoint: A `eventhub_endpoint` block as defined below.
:param pulumi.Input[str] eventhub_endpoint_id: Specifies the id where the Event Hub is located.
:param pulumi.Input[str] expiration_time_utc: Specifies the expiration time of the event subscription (Datetime Format `RFC 3339`).
:param pulumi.Input[pulumi.InputType['EventSubscriptionHybridConnectionEndpointArgs']] hybrid_connection_endpoint: A `hybrid_connection_endpoint` block as defined below.
:param pulumi.Input[str] hybrid_connection_endpoint_id: Specifies the id where the Hybrid Connection is located.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_event_types: A list of applicable event types that need to be part of the event subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] labels: A list of labels to assign to the event subscription.
:param pulumi.Input[str] name: Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['EventSubscriptionRetryPolicyArgs']] retry_policy: A `retry_policy` block as defined below.
:param pulumi.Input[str] scope: Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_bus_queue_endpoint_id: Specifies the id where the Service Bus Queue is located.
:param pulumi.Input[str] service_bus_topic_endpoint_id: Specifies the id where the Service Bus Topic is located.
:param pulumi.Input[pulumi.InputType['EventSubscriptionStorageBlobDeadLetterDestinationArgs']] storage_blob_dead_letter_destination: A `storage_blob_dead_letter_destination` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionStorageQueueEndpointArgs']] storage_queue_endpoint: A `storage_queue_endpoint` block as defined below.
:param pulumi.Input[pulumi.InputType['EventSubscriptionSubjectFilterArgs']] subject_filter: A `subject_filter` block as defined below.
:param pulumi.Input[str] topic_name: (Optional/ **Deprecated) Specifies the name of the topic to associate with the event subscription.
:param pulumi.Input[pulumi.InputType['EventSubscriptionWebhookEndpointArgs']] webhook_endpoint: A `webhook_endpoint` block as defined below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["advanced_filter"] = advanced_filter
__props__["azure_function_endpoint"] = azure_function_endpoint
__props__["event_delivery_schema"] = event_delivery_schema
__props__["eventhub_endpoint"] = eventhub_endpoint
__props__["eventhub_endpoint_id"] = eventhub_endpoint_id
__props__["expiration_time_utc"] = expiration_time_utc
__props__["hybrid_connection_endpoint"] = hybrid_connection_endpoint
__props__["hybrid_connection_endpoint_id"] = hybrid_connection_endpoint_id
__props__["included_event_types"] = included_event_types
__props__["labels"] = labels
__props__["name"] = name
__props__["retry_policy"] = retry_policy
__props__["scope"] = scope
__props__["service_bus_queue_endpoint_id"] = service_bus_queue_endpoint_id
__props__["service_bus_topic_endpoint_id"] = service_bus_topic_endpoint_id
__props__["storage_blob_dead_letter_destination"] = storage_blob_dead_letter_destination
__props__["storage_queue_endpoint"] = storage_queue_endpoint
__props__["subject_filter"] = subject_filter
__props__["topic_name"] = topic_name
__props__["webhook_endpoint"] = webhook_endpoint
return EventSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="advancedFilter")
def advanced_filter(self) -> pulumi.Output[Optional['outputs.EventSubscriptionAdvancedFilter']]:
"""
A `advanced_filter` block as defined below.
"""
return pulumi.get(self, "advanced_filter")
@property
@pulumi.getter(name="azureFunctionEndpoint")
def azure_function_endpoint(self) -> pulumi.Output[Optional['outputs.EventSubscriptionAzureFunctionEndpoint']]:
"""
An `azure_function_endpoint` block as defined below.
"""
return pulumi.get(self, "azure_function_endpoint")
@property
@pulumi.getter(name="eventDeliverySchema")
def event_delivery_schema(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the event delivery schema for the event subscription. Possible values include: `EventGridSchema`, `CloudEventSchemaV1_0`, `CustomInputSchema`. Defaults to `EventGridSchema`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "event_delivery_schema")
@property
@pulumi.getter(name="eventhubEndpoint")
def eventhub_endpoint(self) -> pulumi.Output['outputs.EventSubscriptionEventhubEndpoint']:
"""
A `eventhub_endpoint` block as defined below.
"""
return pulumi.get(self, "eventhub_endpoint")
@property
@pulumi.getter(name="eventhubEndpointId")
def eventhub_endpoint_id(self) -> pulumi.Output[str]:
"""
Specifies the id where the Event Hub is located.
"""
return pulumi.get(self, "eventhub_endpoint_id")
@property
@pulumi.getter(name="expirationTimeUtc")
def expiration_time_utc(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the expiration time of the event subscription (Datetime Format `RFC 3339`).
"""
return pulumi.get(self, "expiration_time_utc")
@property
@pulumi.getter(name="hybridConnectionEndpoint")
def hybrid_connection_endpoint(self) -> pulumi.Output['outputs.EventSubscriptionHybridConnectionEndpoint']:
"""
A `hybrid_connection_endpoint` block as defined below.
"""
return pulumi.get(self, "hybrid_connection_endpoint")
@property
@pulumi.getter(name="hybridConnectionEndpointId")
def hybrid_connection_endpoint_id(self) -> pulumi.Output[str]:
"""
Specifies the id where the Hybrid Connection is located.
"""
return pulumi.get(self, "hybrid_connection_endpoint_id")
@property
@pulumi.getter(name="includedEventTypes")
def included_event_types(self) -> pulumi.Output[Sequence[str]]:
"""
A list of applicable event types that need to be part of the event subscription.
"""
return pulumi.get(self, "included_event_types")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of labels to assign to the event subscription.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="retryPolicy")
def retry_policy(self) -> pulumi.Output['outputs.EventSubscriptionRetryPolicy']:
"""
A `retry_policy` block as defined below.
"""
return pulumi.get(self, "retry_policy")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[str]:
"""
Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="serviceBusQueueEndpointId")
def service_bus_queue_endpoint_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the id where the Service Bus Queue is located.
"""
return pulumi.get(self, "service_bus_queue_endpoint_id")
@property
@pulumi.getter(name="serviceBusTopicEndpointId")
def service_bus_topic_endpoint_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the id where the Service Bus Topic is located.
"""
return pulumi.get(self, "service_bus_topic_endpoint_id")
@property
@pulumi.getter(name="storageBlobDeadLetterDestination")
def storage_blob_dead_letter_destination(self) -> pulumi.Output[Optional['outputs.EventSubscriptionStorageBlobDeadLetterDestination']]:
"""
A `storage_blob_dead_letter_destination` block as defined below.
"""
return pulumi.get(self, "storage_blob_dead_letter_destination")
@property
@pulumi.getter(name="storageQueueEndpoint")
def storage_queue_endpoint(self) -> pulumi.Output[Optional['outputs.EventSubscriptionStorageQueueEndpoint']]:
"""
A `storage_queue_endpoint` block as defined below.
"""
return pulumi.get(self, "storage_queue_endpoint")
@property
@pulumi.getter(name="subjectFilter")
def subject_filter(self) -> pulumi.Output[Optional['outputs.EventSubscriptionSubjectFilter']]:
"""
A `subject_filter` block as defined below.
"""
return pulumi.get(self, "subject_filter")
@property
@pulumi.getter(name="topicName")
def topic_name(self) -> pulumi.Output[str]:
"""
(Optional/ **Deprecated) Specifies the name of the topic to associate with the event subscription.
"""
return pulumi.get(self, "topic_name")
@property
@pulumi.getter(name="webhookEndpoint")
def webhook_endpoint(self) -> pulumi.Output[Optional['outputs.EventSubscriptionWebhookEndpoint']]:
"""
A `webhook_endpoint` block as defined below.
"""
return pulumi.get(self, "webhook_endpoint")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 60.716749 | 290 | 0.713115 |
acdedd3e3c9222b689b2b4d9b029fb0b12eae17c | 361 | py | Python | app/blog/migrations/0002_rename_realesed_date_post_release_date.py | gbr-mendes/tech-blog | c9139af4ba88480a0fb6c436c2e8d60f4f5de03f | [
"MIT"
] | null | null | null | app/blog/migrations/0002_rename_realesed_date_post_release_date.py | gbr-mendes/tech-blog | c9139af4ba88480a0fb6c436c2e8d60f4f5de03f | [
"MIT"
] | null | null | null | app/blog/migrations/0002_rename_realesed_date_post_release_date.py | gbr-mendes/tech-blog | c9139af4ba88480a0fb6c436c2e8d60f4f5de03f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-29 18:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='realesed_date',
new_name='release_date',
),
]
| 19 | 48 | 0.581717 |
acdedd45eef0d0b0845272d82225ef2a09f15937 | 2,113 | py | Python | addons/l10n_br/__manifest__.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/l10n_br/__manifest__.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/l10n_br/__manifest__.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2009 Renato Lima - Akretion
{
'name': 'Brazilian - Accounting',
'category': 'Localization',
'description': """
Base module for the Brazilian localization
==========================================
This module consists in:
- Generic Brazilian chart of accounts
- Brazilian taxes such as:
- IPI
- ICMS
- PIS
- COFINS
- ISS
- IR
- IRPJ
- CSLL
The field tax_discount has also been added in the account.tax.template and
account.tax objects to allow the proper computation of some Brazilian VATs
such as ICMS. The chart of account creation wizard has been extended to
propagate those new data properly.
It's important to note however that this module lack many implementations to
use Odoo properly in Brazil. Those implementations (such as the electronic
fiscal Invoicing which is already operational) are brought by more than 15
additional modules of the Brazilian Launchpad localization project
https://launchpad.net/openerp.pt-br-localiz and their dependencies in the
extra addons branch. Those modules aim at not breaking with the remarkable
Odoo modularity, this is why they are numerous but small. One of the
reasons for maintaining those modules apart is that Brazilian Localization
leaders need commit rights agility to complete the localization as companies
fund the remaining legal requirements (such as soon fiscal ledgers,
accounting SPED, fiscal SPED and PAF ECF that are still missing as September
2011). Those modules are also strictly licensed under AGPL V3 and today don't
come with any additional paid permission for online use of 'private modules'.
""",
'author': 'Akretion, Odoo Brasil',
'website': 'http://openerpbrasil.org',
'depends': ['account'],
'data': [
'data/l10n_br_chart_data.xml',
'data/account.account.template.csv',
'data/account_data.xml',
'data/account_tax_template_data.xml',
'views/account_view.xml',
],
}
| 37.070175 | 78 | 0.707998 |
acdedd7da65f34fcdff919546d1fa4829537c086 | 4,057 | py | Python | code/model/hybrid_c3d.py | Yangzhen0000/CDVD-TSP | 95adff7c0b827b7170619b58a3edcec03a9a137e | [
"MIT"
] | null | null | null | code/model/hybrid_c3d.py | Yangzhen0000/CDVD-TSP | 95adff7c0b827b7170619b58a3edcec03a9a137e | [
"MIT"
] | null | null | null | code/model/hybrid_c3d.py | Yangzhen0000/CDVD-TSP | 95adff7c0b827b7170619b58a3edcec03a9a137e | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
import model.blocks as blocks
def make_model(args):
device = 'cpu' if args.cpu else 'cuda'
return HybridC3D(in_channels=args.n_colors, n_sequence=args.n_sequence, out_channels=args.n_colors,
n_resblock=args.n_resblock, n_feat=args.n_feat, device=device)
class HybridC3D(nn.Module):
def __init__(self, in_channels=3, n_sequence=3, out_channels=3, n_resblock=3, n_feat=32, device='cuda'):
super(HybridC3D, self).__init__()
print("Creating HybridC3D Net")
self.n_sequence = n_sequence
self.device = device
assert n_sequence == 3, "Only support args.n_sequence=3; but get args.n_sequence={}".format(n_sequence)
InBlock = []
# b, 3, 3, 256, 256
InBlock.extend([nn.Sequential(
nn.Conv3d(in_channels, n_feat, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.ReLU(inplace=True)
)])
# b, 32, 3, 256, 256
InBlock.extend([blocks.ResBlock3D(n_feat, n_feat, kernel_size=(3, 3, 3), stride=(1, 1, 1))
for _ in range(n_resblock)])
# b, 32, 3, 256, 256
# encoder1
Encoder_first = [nn.Sequential(
nn.Conv3d(n_feat, n_feat * 2, kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1)),
nn.ReLU(inplace=True)
)]
# b, 64, 3, 128, 128
Encoder_first.extend([blocks.ResBlock3D(n_feat * 2, n_feat * 2, kernel_size=(3, 3, 3), stride=(1, 1, 1))
for _ in range(n_resblock)])
# b, 64, 3, 128, 128
# encoder2
Encoder_second = [nn.Sequential(
nn.Conv3d(n_feat * 2, n_feat * 4, kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
nn.ReLU(inplace=True)
)]
# b, 128, 1, 64, 64
Encoder_second.extend([blocks.ResBlock3D(n_feat * 4, n_feat * 4, kernel_size=(1, 3, 3),
stride=(1, 1, 1), padding=(0, 1, 1)) for _ in range(n_resblock)])
# decoder2
Decoder_second = [nn.Sequential(
nn.ConvTranspose3d(n_feat * 4, n_feat * 2, kernel_size=(1, 3, 3), stride=(1, 2, 2),
padding=(0, 1, 1), output_padding=(0, 1, 1)),
nn.ReLU(inplace=True)
)]
Decoder_second.extend([blocks.ResBlock3D(n_feat * 2, n_feat * 2, kernel_size=(1, 3, 3),
stride=(1, 1, 1), padding=(0, 1, 1)) for _ in range(n_resblock)])
# decoder1
Decoder_first = [nn.Sequential(
nn.ConvTranspose3d(n_feat * 2, n_feat, kernel_size=(1, 3, 3), stride=(1, 2, 2),
padding=(0, 1, 1), output_padding=(0, 1, 1)),
nn.ReLU(inplace=True)
)]
Decoder_first.extend([blocks.ResBlock3D(n_feat, n_feat, kernel_size=(1, 3, 3),
stride=(1, 1, 1), padding=(0, 1, 1)) for _ in range(n_resblock)])
OutBlock = nn.Conv3d(n_feat, out_channels, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1))
self.inBlock = nn.Sequential(*InBlock)
self.encoder_first = nn.Sequential(*Encoder_first)
self.encoder_second = nn.Sequential(*Encoder_second)
self.decoder_second = nn.Sequential(*Decoder_second)
self.decoder_first = nn.Sequential(*Decoder_first)
self.outBlock = OutBlock
def forward(self, x):
reference = x[:, 1, :, :, :]
in_sequence = x.permute(0, 2, 1, 3, 4) #N*C*D*H*W
inblock = self.inBlock(in_sequence) #N*n_feat*D*H*W
encoder_first = self.encoder_first(inblock) #N*(2*n_feat)*D*(H/2)*(W/2)
encoder_second = self.encoder_second(encoder_first) #N*(4*n_feat)*D*(H/4)*(W/4)
decoder_second = self.decoder_second(encoder_second)
decoder_first = self.decoder_first(decoder_second + encoder_first[:, :, 1:2, :, :])
outBlock = self.outBlock(decoder_first + inblock[:, :, 1:2, :, :])
out = torch.squeeze(outBlock)
return out + reference
| 45.077778 | 112 | 0.56914 |
acdede67149362452b914e4b53deb6de7c8d93c0 | 423 | py | Python | BOJ/16000~16999/16500~16599/16503.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/16000~16999/16500~16599/16503.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/16000~16999/16500~16599/16503.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | def f(a,b,c):
if c == '+':
return a+b
if c == '-':
return a-b
if c == '*':
return a*b
if c == '/':
if a*b <0:
return abs(a)//abs(b) * (-1)
return abs(a)//abs(b)
L=input().split()
for i in range(len(L)):
if i % 2 == 0:
L[i]=int(L[i])
M1 = f(f(L[0],L[2],L[1]),L[4],L[3])
M2 = f(L[0],f(L[2],L[4],L[3]),L[1])
print(min(M1,M2))
print(max(M1,M2)) | 20.142857 | 41 | 0.394799 |
acdede744695c8a5b69ab11cebb4c45403b6c8d6 | 13,639 | py | Python | argocd_python_client/model/version_version_message.py | RyanSiu1995/argocd-python-client | 2e8f097fe09f247a46ac70692241a93d1acd076a | [
"MIT"
] | 1 | 2021-11-20T13:37:43.000Z | 2021-11-20T13:37:43.000Z | argocd_python_client/model/version_version_message.py | RyanSiu1995/argocd-python-client | 2e8f097fe09f247a46ac70692241a93d1acd076a | [
"MIT"
] | null | null | null | argocd_python_client/model/version_version_message.py | RyanSiu1995/argocd-python-client | 2e8f097fe09f247a46ac70692241a93d1acd076a | [
"MIT"
] | null | null | null | """
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argocd_python_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argocd_python_client.exceptions import ApiAttributeError
class VersionVersionMessage(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'build_date': (str,), # noqa: E501
'compiler': (str,), # noqa: E501
'git_commit': (str,), # noqa: E501
'git_tag': (str,), # noqa: E501
'git_tree_state': (str,), # noqa: E501
'go_version': (str,), # noqa: E501
'helm_version': (str,), # noqa: E501
'jsonnet_version': (str,), # noqa: E501
'ksonnet_version': (str,), # noqa: E501
'kubectl_version': (str,), # noqa: E501
'kustomize_version': (str,), # noqa: E501
'platform': (str,), # noqa: E501
'version': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'build_date': 'BuildDate', # noqa: E501
'compiler': 'Compiler', # noqa: E501
'git_commit': 'GitCommit', # noqa: E501
'git_tag': 'GitTag', # noqa: E501
'git_tree_state': 'GitTreeState', # noqa: E501
'go_version': 'GoVersion', # noqa: E501
'helm_version': 'HelmVersion', # noqa: E501
'jsonnet_version': 'JsonnetVersion', # noqa: E501
'ksonnet_version': 'KsonnetVersion', # noqa: E501
'kubectl_version': 'KubectlVersion', # noqa: E501
'kustomize_version': 'KustomizeVersion', # noqa: E501
'platform': 'Platform', # noqa: E501
'version': 'Version', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""VersionVersionMessage - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
build_date (str): [optional] # noqa: E501
compiler (str): [optional] # noqa: E501
git_commit (str): [optional] # noqa: E501
git_tag (str): [optional] # noqa: E501
git_tree_state (str): [optional] # noqa: E501
go_version (str): [optional] # noqa: E501
helm_version (str): [optional] # noqa: E501
jsonnet_version (str): [optional] # noqa: E501
ksonnet_version (str): [optional] # noqa: E501
kubectl_version (str): [optional] # noqa: E501
kustomize_version (str): [optional] # noqa: E501
platform (str): [optional] # noqa: E501
version (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""VersionVersionMessage - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
build_date (str): [optional] # noqa: E501
compiler (str): [optional] # noqa: E501
git_commit (str): [optional] # noqa: E501
git_tag (str): [optional] # noqa: E501
git_tree_state (str): [optional] # noqa: E501
go_version (str): [optional] # noqa: E501
helm_version (str): [optional] # noqa: E501
jsonnet_version (str): [optional] # noqa: E501
ksonnet_version (str): [optional] # noqa: E501
kubectl_version (str): [optional] # noqa: E501
kustomize_version (str): [optional] # noqa: E501
platform (str): [optional] # noqa: E501
version (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.865132 | 121 | 0.563384 |
acdedfacc6a6e0d47d13e9aed550899a9809d24f | 4,692 | py | Python | tests/prefetch_related/test_prefetch_related_objects.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2019-09-26T10:32:47.000Z | 2019-09-26T10:32:47.000Z | tests/prefetch_related/test_prefetch_related_objects.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2019-07-23T09:25:06.000Z | 2019-07-23T09:25:06.000Z | tests/prefetch_related/test_prefetch_related_objects.py | roverdotcom/django-prefetch-utils | f901cb2159b3e95f44baf18812d5bbb7c52afd97 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T14:38:20.000Z | 2021-12-22T14:38:20.000Z | from django.db.models import Prefetch
from django.db.models import prefetch_related_objects
from django.test import TestCase
from .models import Author
from .models import Book
from .models import Reader
class PrefetchRelatedObjectsTests(TestCase):
"""
Since prefetch_related_objects() is just the inner part of
prefetch_related(), only do basic tests to ensure its API hasn't changed.
"""
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title="Poems")
cls.book2 = Book.objects.create(title="Jane Eyre")
cls.book3 = Book.objects.create(title="Wuthering Heights")
cls.book4 = Book.objects.create(title="Sense and Sensibility")
cls.author1 = Author.objects.create(name="Charlotte", first_book=cls.book1)
cls.author2 = Author.objects.create(name="Anne", first_book=cls.book1)
cls.author3 = Author.objects.create(name="Emily", first_book=cls.book1)
cls.author4 = Author.objects.create(name="Jane", first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name="Amy")
cls.reader2 = Reader.objects.create(name="Belinda")
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
prefetch_related_objects([book1], "unknown_attribute")
def test_m2m_forward(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], "authors")
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_m2m_reverse(self):
author1 = Author.objects.get(id=self.author1.id)
with self.assertNumQueries(1):
prefetch_related_objects([author1], "books")
with self.assertNumQueries(0):
self.assertCountEqual(author1.books.all(), [self.book1, self.book2])
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(authors, "first_book")
with self.assertNumQueries(0):
[author.first_book for author in authors]
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(books, "first_time_authors")
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
authors = list(Author.objects.all())
with self.assertNumQueries(2):
prefetch_related_objects(authors, "books__read_by")
with self.assertNumQueries(0):
self.assertEqual(
[[[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in authors],
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
],
)
def test_prefetch_object(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch("authors"))
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_prefetch_object_to_attr(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch("authors", to_attr="the_authors"))
with self.assertNumQueries(0):
self.assertCountEqual(book1.the_authors, [self.author1, self.author2, self.author3])
def test_prefetch_queryset(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects(
[book1], Prefetch("authors", queryset=Author.objects.filter(id__in=[self.author1.id, self.author2.id]))
)
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2])
| 39.762712 | 119 | 0.645354 |
acdedfd6d0cf5bb1d952427e5cc2a025db3d71ae | 5,438 | py | Python | addCourseForm.py | Poom1997/GManPyGUI | eae3ad10394737a2b7ed14a4a1375cecd005d5c9 | [
"MIT"
] | null | null | null | addCourseForm.py | Poom1997/GManPyGUI | eae3ad10394737a2b7ed14a4a1375cecd005d5c9 | [
"MIT"
] | null | null | null | addCourseForm.py | Poom1997/GManPyGUI | eae3ad10394737a2b7ed14a4a1375cecd005d5c9 | [
"MIT"
] | null | null | null | from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import plugin.databaseConnect as database
class addCourseUI(QMainWindow):
def __init__(self,parent = None):
QMainWindow.__init__(self,None)
self.setMinimumSize(900,600)
self.setWindowTitle("Class Course")
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("resources/imagess/programBackground.png")))
self.edu_logo = QPixmap("resources/images/educationLogo.png")
self.setPalette(palette)
self.bar = QPixmap("resources/images/topBarBackground.png")
self.parent = parent
self.UIinit()
def UIinit(self):
loader = QUiLoader()
form = loader.load("resources/UI/addCourse.ui",None)
self.setCentralWidget(form)
#Upper Bar
self.bar_group = form.findChild(QLabel,"barLabel")
self.bar_group.setPixmap(self.bar)
self.home_button = form.findChild(QPushButton,"homeButton")
self.profile_button = form.findChild(QPushButton,"profileButton")
self.faculties_button = form.findChild(QPushButton,"facultiesButton")
self.majors_button = form.findChild(QPushButton,"majorsButton")
self.course_button = form.findChild(QPushButton,"courseButton")
self.other_button = form.findChild(QPushButton, "othersButton")
#page properties
self.home_button = form.findChild(QPushButton, "homeButton")
self.status = form.findChild(QLabel,"status")
self.course_code = form.findChild(QLineEdit,"courseCode")
self.course_name = form.findChild(QLineEdit,"courseName")
self.credits = form.findChild(QLineEdit,"credit")
self.lecturer = form.findChild(QLineEdit,"lectName")
self.period = form.findChild(QLineEdit,"period")
self.year = form.findChild(QLineEdit,"year")
self.term = form.findChild(QLineEdit,"term")
self.faculty = form.findChild(QLineEdit,"faculty")
self.major = form.findChild(QLineEdit,"major")
self.student_limit = form.findChild(QLineEdit,"studentLimit")
self.building = form.findChild(QLineEdit,"building")
self.pre = form.findChild(QLineEdit, "pre")
self.room = form.findChild(QLineEdit,"room")
self.picture = form.findChild(QLabel,"picture")
self.picture.setPixmap(self.edu_logo)
self.save_button = form.findChild(QPushButton,"saveButton")
self.clear_button = form.findChild(QPushButton,"clearButton")
#Upper Bar pressed
self.home_button.clicked.connect(self.goHome)
self.faculties_button.clicked.connect(self.goFac)
self.majors_button.clicked.connect(self.goMaj)
self.other_button.clicked.connect(self.goOther)
self.course_button.clicked.connect(self.goCourse)
self.profile_button.clicked.connect(self.goProfile)
#Internal Button Pressed
self.save_button.clicked.connect(self.saveCourse)
self.clear_button.clicked.connect(self.clearField)
def goHome(self):
self.parent.changePageLoginSection("home")
def goProfile(self):
self.parent.changePageLoginSection("profile")
def goFac(self):
self.parent.changePageLoginSection("addfaculties")
def goMaj(self):
self.parent.changePageLoginSection("addmajor")
def goCourse(self):
self.parent.changePageLoginSection("addcourse")
def goOther(self):
self.parent.changePageLoginSection("otherOption")
##Use to clear all textField##
def clearField(self):
self.course_code.setText("")
self.course_name.setText("")
self.credits.setText("")
self.lecturer.setText("")
self.period.setText("")
self.year.setText("")
self.term.setText("")
self.faculty.setText("")
self.major.setText("")
self.student_limit.setText("")
self.building.setText("")
self.room.setText("")
self.pre.setText("")
##Use for saving the course after enter Information##
def saveCourse(self):
temp = {}
db = database.databaseCourse()
temp["courseID"] = self.course_code.text()
temp["courseName"] = self.course_name.text()
temp["credit"] = self.credits.text()
temp["lecturer"] = self.lecturer.text()
temp["period"] = self.period.text()
temp["year"] = self.year.text()
temp["term"] = self.term.text()
temp["facultyID"] = self.faculty.text()
temp["majorID"] = self.major.text()
temp["student_limit"] = self.student_limit.text()
temp["building"] = self.building.text()
temp["room"] = self.room.text()
temp["pre"] = self.pre.text()
status = db.addCourse(temp)
if(status == 1):
self.parent.showOK("Course Saved", "The course has been saved successfully")
self.clearField()
elif(status[0] == "22P02"):
self.parent.showERROR("Data Integrity Error" + status[0] , "Invalid DataType or Incomplete Form.\nPlease check your fields.")
elif(status[0] == "23505"):
self.parent.showERROR("Data Duplication Error" + status[0], "CourseID already exists.")
elif (status[0] == "23503"):
self.parent.showERROR("Data Consistency Error" + status[0], "Either Professor ID, FacultyID, or Major ID is incorrect.")
| 42.155039 | 137 | 0.653917 |
acdee1a5c2c31321f17ae65b8f60b0d760dffa70 | 387 | py | Python | galera/translate_names.py | spolel/bachelor-project | 605cc7af722873af347b15f08f2297d64ae264de | [
"MIT"
] | null | null | null | galera/translate_names.py | spolel/bachelor-project | 605cc7af722873af347b15f08f2297d64ae264de | [
"MIT"
] | null | null | null | galera/translate_names.py | spolel/bachelor-project | 605cc7af722873af347b15f08f2297d64ae264de | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import sys
import os
from subprocess import PIPE, run
for line in sys.stdin:
if("Procedure" not in line):
result = run(["c++filt", "-n"], input=bytes(line.split(";")[0], 'utf-8'), stdout=PIPE, stderr=PIPE)
#print(line.split(";")[0])
newline = result.stdout.decode("utf-8").strip('\n') + ";" + line.strip('\n')
print(newline)
| 29.769231 | 107 | 0.589147 |
acdee2bb63eade4fc8942dc9d8faf7e7392d2a60 | 4,593 | py | Python | PhysicsTools/PatAlgos/python/slimming/applyDeepBtagging_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | PhysicsTools/PatAlgos/python/slimming/applyDeepBtagging_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | PhysicsTools/PatAlgos/python/slimming/applyDeepBtagging_cff.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.tools.helpers import getPatAlgosToolsTask, addToProcessAndTask
def applyDeepBtagging( process, postfix="" ) :
task = getPatAlgosToolsTask(process)
from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection
process.load('PhysicsTools.PatAlgos.slimming.slimmedJets_cfi')
# update slimmed jets to include DeepFlavour (keep same name)
# make clone for DeepFlavour-less slimmed jets, so output name is preserved
addToProcessAndTask('slimmedJetsNoDeepFlavour', process.slimmedJets.clone(), process, task)
updateJetCollection(
process,
jetSource = cms.InputTag('slimmedJetsNoDeepFlavour'),
# updateJetCollection defaults to MiniAOD inputs but
# here it is made explicit (as in training or MINIAOD redoing)
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
pfCandidates = cms.InputTag('packedPFCandidates'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
muSource = cms.InputTag('slimmedMuons'),
elSource = cms.InputTag('slimmedElectrons'),
jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = [
'pfDeepFlavourJetTags:probb',
'pfDeepFlavourJetTags:probbb',
'pfDeepFlavourJetTags:problepb',
'pfDeepFlavourJetTags:probc',
'pfDeepFlavourJetTags:probuds',
'pfDeepFlavourJetTags:probg',
],
postfix = 'SlimmedDeepFlavour'+postfix,
printWarning = False
)
# slimmedJets with DeepFlavour (remove DeepFlavour-less)
delattr(process, 'slimmedJets')
addToProcessAndTask('slimmedJets', getattr(process,'selectedUpdatedPatJetsSlimmedDeepFlavour'+postfix).clone(), process, task)
# delete module not used anymore (slimmedJets substitutes)
delattr(process, 'selectedUpdatedPatJetsSlimmedDeepFlavour'+postfix)
from RecoBTag.MXNet.pfDeepBoostedJet_cff import _pfDeepBoostedJetTagsAll as pfDeepBoostedJetTagsAll
from RecoBTag.MXNet.pfParticleNet_cff import _pfParticleNetJetTagsAll as pfParticleNetJetTagsAll
# update slimmed jets to include particle-based deep taggers (keep same name)
# make clone for DeepTags-less slimmed AK8 jets, so output name is preserved
addToProcessAndTask('slimmedJetsAK8NoDeepTags', process.slimmedJetsAK8.clone(), process, task)
_btagDiscriminators = cms.PSet( names = cms.vstring(
'pfDeepDoubleBJetTags:probQ',
'pfDeepDoubleBJetTags:probH' )
)
from Configuration.Eras.Modifier_run2_miniAOD_devel_cff import run2_miniAOD_devel
run2_miniAOD_devel.toModify(_btagDiscriminators, names = _btagDiscriminators.names + [
'pfDeepDoubleBvLJetTags:probQCD',
'pfDeepDoubleBvLJetTags:probHbb',
'pfDeepDoubleCvLJetTags:probQCD',
'pfDeepDoubleCvLJetTags:probHcc',
'pfDeepDoubleCvBJetTags:probHbb',
'pfDeepDoubleCvBJetTags:probHcc',
'pfMassIndependentDeepDoubleBvLJetTags:probQCD',
'pfMassIndependentDeepDoubleBvLJetTags:probHbb',
'pfMassIndependentDeepDoubleCvLJetTags:probQCD',
'pfMassIndependentDeepDoubleCvLJetTags:probHcc',
'pfMassIndependentDeepDoubleCvBJetTags:probHbb',
'pfMassIndependentDeepDoubleCvBJetTags:probHcc',
] + pfDeepBoostedJetTagsAll + pfParticleNetJetTagsAll
)
updateJetCollection(
process,
jetSource = cms.InputTag('slimmedJetsAK8NoDeepTags'),
# updateJetCollection defaults to MiniAOD inputs but
# here it is made explicit (as in training or MINIAOD redoing)
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
pfCandidates = cms.InputTag('packedPFCandidates'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
muSource = cms.InputTag('slimmedMuons'),
elSource = cms.InputTag('slimmedElectrons'),
rParam = 0.8,
jetCorrections = ('AK8PFPuppi', cms.vstring(['L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = _btagDiscriminators.names,
postfix = 'SlimmedAK8DeepTags'+postfix,
printWarning = False
)
# slimmedJetsAK8 with DeepTags (remove DeepTags-less)
delattr(process, 'slimmedJetsAK8')
addToProcessAndTask('slimmedJetsAK8', getattr(process,'selectedUpdatedPatJetsSlimmedAK8DeepTags'+postfix).clone(), process, task)
# delete module not used anymore (slimmedJetsAK8 substitutes)
delattr(process, 'selectedUpdatedPatJetsSlimmedAK8DeepTags'+postfix)
| 48.347368 | 133 | 0.727629 |
acdee3206f43b9c107c6bf84999d3b65d08375f5 | 1,042 | py | Python | micromagneticmodel/tests/test_damping.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 5 | 2019-10-21T01:12:16.000Z | 2021-09-24T03:52:30.000Z | micromagneticmodel/tests/test_damping.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 11 | 2019-08-12T22:38:17.000Z | 2022-03-15T00:08:47.000Z | micromagneticmodel/tests/test_damping.py | ubermag/micromagneticmodel | 91ad92d26cdbec369a5a41f7b90a17ca5328cd07 | [
"BSD-3-Clause"
] | 4 | 2020-06-27T15:36:28.000Z | 2021-12-06T15:08:04.000Z | import re
import pytest
import discretisedfield as df
import micromagneticmodel as mm
from .checks import check_term
class TestDamping:
def setup(self):
mesh = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), cell=(1, 1, 1))
field = df.Field(mesh, dim=1, value=0.1)
self.valid_args = [1, 2.0, 5e-11, 1e6, {'a': 1, 'b': 1e-12}, field]
self.invalid_args = [-1, -2.1, 'a', (1, 2), -3.6e-6, '0',
[1, 2, 3], {'a': -1, 'b': 3}]
def test_init_valid_args(self):
for alpha in self.valid_args:
term = mm.Damping(alpha=alpha)
check_term(term)
assert hasattr(term, 'alpha')
assert term.name == 'damping'
assert re.search(r'^Damping\(alpha=.+\)$', repr(term))
def test_init_invalid_args(self):
for alpha in self.invalid_args:
with pytest.raises((TypeError, ValueError)):
term = mm.Damping(alpha=alpha)
with pytest.raises(AttributeError):
term = mm.Damping(wrong=1)
| 32.5625 | 75 | 0.556622 |
acdee321a55d62da8b28d923a4b55708a2381f64 | 45 | py | Python | 1_Kithgard_Dungeon/033-Breakout/breakout.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | 1_Kithgard_Dungeon/033-Breakout/breakout.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | 1_Kithgard_Dungeon/033-Breakout/breakout.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | hero.attack("Door")
hero.attack("Weak Door")
| 15 | 24 | 0.711111 |
acdee347a4f341fe07bbcce22aecdb55c658dd0d | 3,390 | py | Python | src/monitor-control-service/azext_amcs/vendored_sdks/amcs/aio/_monitor_client.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/monitor-control-service/azext_amcs/vendored_sdks/amcs/aio/_monitor_client.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/monitor-control-service/azext_amcs/vendored_sdks/amcs/aio/_monitor_client.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MonitorClientConfiguration
from .operations import DataCollectionEndpointsOperations
from .operations import DataCollectionRuleAssociationsOperations
from .operations import DataCollectionRulesOperations
from .. import models
class MonitorClient(object):
"""Monitor Management Client.
:ivar data_collection_endpoints: DataCollectionEndpointsOperations operations
:vartype data_collection_endpoints: azure.mgmt.amcs.aio.operations.DataCollectionEndpointsOperations
:ivar data_collection_rule_associations: DataCollectionRuleAssociationsOperations operations
:vartype data_collection_rule_associations: azure.mgmt.amcs.aio.operations.DataCollectionRuleAssociationsOperations
:ivar data_collection_rules: DataCollectionRulesOperations operations
:vartype data_collection_rules: azure.mgmt.amcs.aio.operations.DataCollectionRulesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.data_collection_endpoints = DataCollectionEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_collection_rule_associations = DataCollectionRuleAssociationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_collection_rules = DataCollectionRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MonitorClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 45.810811 | 119 | 0.728024 |
acdee407e87a6e74591f11ba6d760f1205005f82 | 1,772 | py | Python | stograde/toolkit/process_students.py | babatana/stograde | c1c447e99c44c23cef9dd857e669861f3708ae77 | [
"MIT"
] | 7 | 2016-08-05T00:41:11.000Z | 2019-08-22T11:12:10.000Z | stograde/toolkit/process_students.py | babatana/stograde | c1c447e99c44c23cef9dd857e669861f3708ae77 | [
"MIT"
] | 145 | 2016-08-04T01:07:11.000Z | 2019-09-09T22:07:13.000Z | stograde/toolkit/process_students.py | babatana/stograde | c1c447e99c44c23cef9dd857e669861f3708ae77 | [
"MIT"
] | 3 | 2017-02-06T21:52:46.000Z | 2019-02-18T10:35:01.000Z | import functools
from typing import List
from .process_parallel import process_parallel
from ..common import chdir
from ..specs.spec import Spec
from ..student.process_student import process_student
from ..student.student_result import StudentResult
def process_students(specs: List['Spec'],
students: List[str],
*,
analyze: bool,
base_dir: str,
clean: bool,
date: str,
interact: bool,
no_progress_bar: bool,
record: bool,
skip_branch_check: bool,
skip_repo_update: bool,
skip_web_compile: bool,
stogit_url: str,
workers: int,
work_dir: str) -> List['StudentResult']:
with chdir(work_dir):
single_analysis = functools.partial(
process_student,
analyze=analyze,
basedir=base_dir,
clean=clean,
date=date,
interact=interact,
skip_branch_check=skip_branch_check,
skip_repo_update=skip_repo_update,
record=record,
specs=specs,
skip_web_compile=skip_web_compile,
stogit_url=stogit_url
)
results: List['StudentResult'] = process_parallel(students,
no_progress_bar,
workers,
single_analysis,
progress_indicator=lambda value: value.name)
return results
| 35.44 | 102 | 0.480813 |
acdee44bb2ada7dc509e6f1819064472dabc1dfa | 5,087 | py | Python | tests/test_pulsar.py | jerry-sun1/enterprise | 4f2bdddf8caa29527cc698babce525a1bea6b3ea | [
"MIT"
] | null | null | null | tests/test_pulsar.py | jerry-sun1/enterprise | 4f2bdddf8caa29527cc698babce525a1bea6b3ea | [
"MIT"
] | 4 | 2017-05-11T06:26:44.000Z | 2021-04-07T14:19:51.000Z | tests/test_pulsar.py | jerry-sun1/enterprise | 4f2bdddf8caa29527cc698babce525a1bea6b3ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pulsar
----------------------------------
Tests for `pulsar` module. Will eventually want to add tests
for time slicing, PINT integration and pickling.
"""
import os
import shutil
import unittest
import numpy as np
from enterprise.pulsar import Pulsar
from tests.enterprise_test_data import datadir
try:
import cPickle as pickle
except:
import pickle
class TestPulsar(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
@classmethod
def tearDownClass(cls):
shutil.rmtree("pickle_dir")
def test_residuals(self):
"""Check Residual shape."""
msg = "Residuals shape incorrect"
assert self.psr.residuals.shape == (4005,), msg
def test_toaerrs(self):
"""Check TOA errors shape."""
msg = "TOA errors shape incorrect"
assert self.psr.toaerrs.shape == (4005,), msg
def test_toas(self):
"""Check TOA shape."""
msg = "TOA shape incorrect"
assert self.psr.toas.shape == (4005,), msg
def test_stoas(self):
"""Check STOA shape."""
msg = "stoa shape incorrect"
assert self.psr.stoas.shape == (4005,), msg
def test_dm(self):
"""Check DM/DMX access."""
msg = "dm value incorrect"
assert self.psr.dm == np.longdouble("13.299393"), msg
msg = "dmx struct incorrect (spotcheck)"
assert len(self.psr.dmx) == 72, msg
assert self.psr.dmx["DMX_0001"]["DMX"] == np.longdouble("0.015161863"), msg
assert self.psr.dmx["DMX_0001"]["fit"], msg
def test_freqs(self):
"""Check frequencies shape."""
msg = "Frequencies shape incorrect"
assert self.psr.freqs.shape == (4005,), msg
def test_flags(self):
"""Check flags shape."""
msg = "Flags shape incorrect"
assert self.psr.flags["f"].shape == (4005,), msg
def test_backend_flags(self):
"""Check backend_flags shape."""
msg = "Backend Flags shape incorrect"
assert self.psr.backend_flags.shape == (4005,), msg
def test_sky(self):
"""Check Sky location."""
sky = (1.4023093811712661, 4.9533700839400492)
msg = "Incorrect sky location"
assert np.allclose(self.psr.theta, sky[0]), msg
assert np.allclose(self.psr.phi, sky[1]), msg
def test_design_matrix(self):
"""Check design matrix shape."""
msg = "Design matrix shape incorrect."
assert self.psr.Mmat.shape == (4005, 91), msg
def test_filter_data(self):
"""Place holder for filter_data tests."""
assert self.psr.filter_data() is None
def test_planetssb(self):
"""Place holder for filter_data tests."""
assert hasattr(self.psr, "planetssb")
def test_sunssb(self):
"""Place holder for filter_data tests."""
assert hasattr(self.psr, "sunssb")
def test_to_pickle(self):
"""Place holder for to_pickle tests."""
self.psr.to_pickle()
with open("B1855+09.pkl", "rb") as f:
pkl_psr = pickle.load(f)
os.remove("B1855+09.pkl")
assert np.allclose(self.psr.residuals, pkl_psr.residuals, rtol=1e-10)
self.psr.to_pickle("pickle_dir")
with open("pickle_dir/B1855+09.pkl", "rb") as f:
pkl_psr = pickle.load(f)
assert np.allclose(self.psr.residuals, pkl_psr.residuals, rtol=1e-10)
def test_wrong_input(self):
"""Test exception when incorrect par(tim) file given."""
with self.assertRaises(IOError) as context:
Pulsar("wrong.par", "wrong.tim")
msg = "Cannot find parfile wrong.par or timfile wrong.tim!"
self.assertTrue(msg in context.exception)
def test_value_error(self):
"""Test exception when unknown argument is given"""
with self.assertRaises(ValueError):
Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.time")
class TestPulsarPint(TestPulsar):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(
datadir + "/B1855+09_NANOGrav_9yv1.gls.par",
datadir + "/B1855+09_NANOGrav_9yv1.tim",
ephem="DE430",
drop_pintpsr=False,
timing_package="pint",
)
# exclude tests pending implementation of .stoas, .dm, .dmx in PintPulsar
def test_stoas(self):
assert hasattr(self.psr, "stoas")
def test_dm(self):
assert hasattr(self.psr, "dm")
def test_planetssb(self):
assert hasattr(self.psr, "planetssb")
def test_sunssb(self):
assert hasattr(self.psr, "sunssb")
def test_model(self):
assert hasattr(self.psr, "model")
def test_pint_toas(self):
assert hasattr(self.psr, "pint_toas")
| 27.497297 | 110 | 0.611952 |
acdee4ac26b767ebb0ae7e7d3d635c349188f23d | 1,880 | py | Python | WaltzControl/entities/positions.py | DaneSpaeth/WaltzControl_refactored | 80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7 | [
"MIT"
] | null | null | null | WaltzControl/entities/positions.py | DaneSpaeth/WaltzControl_refactored | 80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7 | [
"MIT"
] | null | null | null | WaltzControl/entities/positions.py | DaneSpaeth/WaltzControl_refactored | 80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7 | [
"MIT"
] | null | null | null | """Contains Various Classes for storing telescope positions.
"""
class HorizontalPosition:
"""Contains horizontal on sky position (ra, dec).
ra in hours as float
dec in degrees as float
"""
def __init__(self, ra, dec):
"""Construct instance.
Input: instance
ra in hours as float
dec in degrees as float
"""
self.ra = ra
self.dec = dec
def change_ra(self, ra):
"""change ra.
Input: ra in hours as float
"""
self.ra = ra
def change_dec(self, dec):
"""Change dec.
Input: dec in hours as float
"""
self.dec = dec
def change_position(self, ra, dec):
"""Update ra and dec.
Input: ra in hours as float
dec in degrees as float
"""
self.change_ra(ra)
self.change_dec(dec)
class HorizontalPositionHA(HorizontalPosition):
"""Contains horizontal on sky position with hour angle (ra, dec, ha).
ra in hours as float
dec in hours as float
ha in hours as float.
"""
def __init__(self, ra, dec, ha = None):
"""Construct instance.
Input: instance
ra in hours as float
dec in degrees as float
ha in hours as float
"""
super().__init__(ra, dec)
self.ha = ha
def change_ha(self, ha):
"""Change ha.
Input: ha as float.
"""
self.ha = ha
def change_position(self, ra, dec, ha = None):
"""Update ra, dec and (optionally) ha.
"""
super().change_position(ra, dec)
if ha:
self.change_ha(ha)
| 23.5 | 73 | 0.480319 |
acdee50375c57e9132b16355ff0e585962c8e69b | 94 | py | Python | entries/apps.py | brijathom/blog | a7fc73dd2b99740acbcb8a981921ebb134e696f2 | [
"MIT"
] | 2 | 2020-07-19T12:06:20.000Z | 2020-07-19T12:10:26.000Z | entries/apps.py | brijathom/blog | a7fc73dd2b99740acbcb8a981921ebb134e696f2 | [
"MIT"
] | 1 | 2020-07-19T12:06:55.000Z | 2020-07-19T12:07:27.000Z | diary/entries/apps.py | Omkar-Atugade/Diary-App | eabdf99354602451d61aa005079ad589722b8e9a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EntriesConfig(AppConfig):
name = 'entries'
| 15.666667 | 34 | 0.712766 |
acdee516858727e55170f967650546302ded417a | 2,910 | py | Python | app/main/app/service/config_util.py | sesmond/idcard_web | 688e74516812bb4945fe7fee70fd1efcbfa58ae6 | [
"Apache-2.0"
] | 1 | 2021-04-30T15:13:23.000Z | 2021-04-30T15:13:23.000Z | app/main/app/service/config_util.py | sesmond/idcard_web | 688e74516812bb4945fe7fee70fd1efcbfa58ae6 | [
"Apache-2.0"
] | null | null | null | app/main/app/service/config_util.py | sesmond/idcard_web | 688e74516812bb4945fe7fee70fd1efcbfa58ae6 | [
"Apache-2.0"
] | null | null | null | """
提供config的一些读取方法
@Time : 2019/11/6 4:02 下午
@File : config_util.py
"""
import cv2
import numpy as np
from numpy import random
from app.main.app.entity.idcard import IdCard
from app.main.app.utils import config, image_util, file_utils
addr_list = [] # 地址
# 获取姓名
def getName():
xing_len = len(config.xing)
xing_idx = np.random.randint(0, xing_len)
xing = config.xing[xing_idx]
ming_len = len(config.mingzi)
ming_idx = np.random.randint(0, ming_len)
ming = config.mingzi[ming_idx]
return xing + ming
def initArea():
for i in range(0, len(config.area), 2):
if len(config.area[i]) > 5:
ar = [config.area[i], config.area[i + 1]]
addr_list.append(ar)
print("初始化 配置参数")
def getAddress():
index = random.randint(len(addr_list))
str = addr_list[index]
addess = [str[0], str[1] \
+ config.address3[random.randint(len(config.address3))], str[1]]
return addess
def getIdcode(num):
'''
生成身份证号码
:param num: 所在地编码
:return:
'''
# TODO 年月日
idCode = [1] * 5
# 年
idCode[0] = str(random.randint(1965, 2000))
# 月
idCode[1] = str(random.randint(1, 13)).zfill(2)
# 日
idCode[2] = str(random.randint(1, 29)).zfill(2)
# 三位校验码
str3 = str(random.randint(100, 999))
pinCode = num + idCode[0] + idCode[1] + idCode[2] \
+ str3
idCode[3] = random.choice(["男", "女"])
# 最后一位
veri_code = ['1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2']
pinCode = pinCode + random.choice(veri_code)
idCode[4] = pinCode
return idCode
def getExpDate():
month = str(random.randint(1, 13)).zfill(2)
day = str(random.randint(1, 29)).zfill(2)
start_year = random.randint(2006, 2019)
year_len = random.choice([10, 20, 100])
# 有效日期
exp_start = str(start_year) + "." + \
month + "." + day + "-"
if year_len == 100:
exp_end = "长期"
else:
exp_end = str(start_year + year_len) + "." + \
month + "." + day
return exp_start + exp_end
def getZu():
return config.zu[random.randint(len(config.zu))]
def generateIdCard():
"""
随机生成IdCard
:return:
"""
card = IdCard()
addr = getAddress()
idCode = getIdcode(addr[0])
card.name = getName()
card.sex = idCode[3]
# 民族
card.nation = getZu()
card.year = idCode[0]
card.month = idCode[1]
card.day = idCode[2]
card.addr = addr[1]
# 证件号码
card.idNo = idCode[4]
# 签发机关
card.org = addr[2] + "公安局"
# 有效期限
card.validPeriod = getExpDate()
icon_list = file_utils.get_files("resource/icon")
img_p = random.choice(icon_list)
card.avatar = cv2.imread(img_p)
return card
if __name__ == '__main__':
print(getName())
print(getZu())
initArea()
print(getAddress())
print(getExpDate())
print(getIdcode('12345'))
| 23.28 | 78 | 0.577663 |
acdee516b0dba241157ea6cddd0d0f637d209876 | 3,891 | py | Python | a10sdk/core/ipv6/ipv6_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/ipv6/ipv6_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/ipv6/ipv6_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class Nat(A10BaseClass):
"""Class Description::
Configure IPv6 NAT.
Class nat supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param pool_group_list: {"minItems": 1, "items": {"type": "pool-group"}, "uniqueItems": true, "array": [{"required": ["pool-group-name"], "properties": {"member-list": {"minItems": 1, "items": {"type": "member"}, "uniqueItems": true, "array": [{"required": ["pool-name"], "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "pool-name": {"description": "Specify NAT pool name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ipv6/nat/pool-group/{pool-group-name}/member/{pool-name}"}, "pool-group-name": {"description": "Specify pool group name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "vrid": {"description": "Specify VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ipv6/nat/pool-group/{pool-group-name}"}
:param pool_list: {"minItems": 1, "items": {"type": "pool"}, "uniqueItems": true, "array": [{"required": ["pool-name"], "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "start-address": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure start IP address of NAT pool", "format": "ipv6-address"}, "vrid": {"description": "Specify VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "netmask": {"description": "Configure mask for pool", "format": "number", "optional": true, "maximum": 128, "minimum": 64, "modify-not-allowed": 1, "type": "number"}, "end-address": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure end IP address of NAT pool", "format": "ipv6-address"}, "ip-rr": {"description": "Use IP address round-robin behavior", "format": "flag", "default": 0, "type": "number", "modify-not-allowed": 1, "optional": true}, "scaleout-device-id": {"description": "Configure Scaleout device id to which this NAT pool is to be bound (Specify Scaleout device id)", "format": "number", "optional": true, "maximum": 64, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "gateway": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure gateway IP", "format": "ipv6-address"}, "pool-name": {"description": "Specify pool name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ipv6/nat/pool/{pool-name}"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ipv6/nat`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "nat"
self.a10_url="/axapi/v3/ipv6/nat"
self.DeviceProxy = ""
self.icmpv6 = {}
self.pool_group_list = []
self.inside = {}
self.pool_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| 97.275 | 1,728 | 0.634798 |
acdee547cbf4dc083e6c26766a7ee196fe0afb88 | 1,695 | py | Python | tools/nntool/importer/onnx/handlers/backend/global_pool_mixin.py | mfkiwl/gap_sdk | 642b798dfdc7b85ccabe6baba295033f0eadfcd4 | [
"Apache-2.0"
] | null | null | null | tools/nntool/importer/onnx/handlers/backend/global_pool_mixin.py | mfkiwl/gap_sdk | 642b798dfdc7b85ccabe6baba295033f0eadfcd4 | [
"Apache-2.0"
] | null | null | null | tools/nntool/importer/onnx/handlers/backend/global_pool_mixin.py | mfkiwl/gap_sdk | 642b798dfdc7b85ccabe6baba295033f0eadfcd4 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from graph.types.base import NNEdge
from graph.types.others import GlobalPoolParameters
from importer.common.provisional_dim import ProvisionalDim
class GlobalPoolMixin(object):
@classmethod
def _common(cls, node, pool_type="max", **kwargs):
all_nodes = kwargs['all_nodes']
G = kwargs['G']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
x = inputs[0]
x_shape = x[2].shape
unknown_dims = sum(1 if dim is None else 0 for dim in x_shape)
params = GlobalPoolParameters(
valid_name,
pool_type=pool_type,
axis=tuple(range(1, len(x_shape) - unknown_dims)),
keep_dims=True
)
pout_dims = ProvisionalDim([x_shape[0], x_shape[1]] + ([1] * (len(x_shape) - 2)))
G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
all_nodes[node.output[0]] = (params, 0, pout_dims)
return params
| 40.357143 | 89 | 0.687906 |
acdee5954cacfbbf80dcefbeb62df0ef786fc85d | 13,302 | py | Python | shap_domino/explainers/_exact.py | PiotrekGa/shap | 4fd2608f4e4c0abb0410df6bd7d63fd806748af1 | [
"MIT"
] | null | null | null | shap_domino/explainers/_exact.py | PiotrekGa/shap | 4fd2608f4e4c0abb0410df6bd7d63fd806748af1 | [
"MIT"
] | null | null | null | shap_domino/explainers/_exact.py | PiotrekGa/shap | 4fd2608f4e4c0abb0410df6bd7d63fd806748af1 | [
"MIT"
] | null | null | null | from ..utils import MaskedModel, shapley_coefficients, make_masks, delta_minimization_order
from .._explanation import Explanation
from ._explainer import Explainer
import numpy as np
import pandas as pd
import logging
import scipy.special
import numpy as np
import itertools
import sys
from numba import jit
from .. import links
log = logging.getLogger('shap_domino')
class Exact(Explainer):
""" Computes SHAP values via an optimized exact enumeration.
This works well for standard Shapley value maskers for models with less than ~15 features that vary
from the background per sample. It also works well for Owen values from hclustering structured
maskers when there are less than ~100 features that vary from the background per sample. This
explainer minmizes the number of function evaluations needed by ordering the masking sets to
minimize sequential differences. This is done using gray codes for standard Shapley values
and a greedly sorting method for hclustering structured maskers.
"""
def __init__(self, model, masker, link=links.identity):
""" Build an explainers.Exact object for the given model using the given masker object.
Parameters
----------
model : function
A callable python object that executes the model given a set of input data samples.
masker : function or numpy.array or pandas.DataFrame
A callable python object used to "mask" out hidden features of the form `masker(mask, *fargs)`.
It takes a single a binary mask and an input sample and returns a matrix of masked samples. These
masked samples are evaluated using the model function and the outputs are then averaged.
As a shortcut for the standard masking used by SHAP you can pass a background data matrix
instead of a function and that matrix will be used for masking. To use a clustering
game structure you can pass a shap_domino.maskers.TabularPartitions(data) object.
link : function
The link function used to map between the output units of the model and the SHAP value units. By
default it is shap_domino.links.identity, but shap_domino.links.logit can be useful so that expectations are
computed in probability units while explanations remain in the (more naturally additive) log-odds
units. For more details on how link functions work see any overview of link functions for generalized
linear models.
"""
super(Exact, self).__init__(model, masker, link=link)
if getattr(masker, "clustering", None) is not None:
self._partition_masks,self._partition_masks_inds = partition_masks(masker.clustering)
self._partition_delta_indexes = partition_delta_indexes(masker.clustering, self._partition_masks)
self._gray_code_cache = {} # used to avoid regenerating the same gray code patterns
def __call__(self, *args, max_evals=100000, main_effects=False, error_bounds=False, batch_size="auto", silent=False):
""" Explains the output of model(*args), where args represents one or more parallel iterators.
"""
# we entirely rely on the general call implementation, we override just to remove **kwargs
# from the function signature
return super(Exact, self).__call__(
*args, max_evals=max_evals, main_effects=main_effects, error_bounds=error_bounds,
batch_size=batch_size, silent=silent
)
def _cached_gray_codes(self, n):
if n not in self._gray_code_cache:
self._gray_code_cache[n] = gray_code_indexes(n)
return self._gray_code_cache[n]
def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent):
""" Explains a single row and returns the tuple (row_values, row_expected_values, row_mask_shapes).
"""
# build a masked version of the model for the current input sample
fm = MaskedModel(self.model, self.masker, self.link, *row_args)
# do the standard Shapley values
inds = None
if getattr(self.masker, "clustering", None) is None:
# see which elements we actually need to perturb
inds = fm.varying_inputs()
# make sure we have enough evals
if max_evals is not None and max_evals != "auto" and max_evals < 2**len(inds):
raise Exception("It takes %d masked evaluations to run the Exact explainer on this instance, but max_evals=%d" %(2**len(inds), max_evals))
# generate the masks in gray code order (so that we change the inputs as little
# as possible while we iterate to minimize the need to re-eval when the inputs
# don't vary from the background)
delta_indexes = self._cached_gray_codes(len(inds))
# map to a larger mask that includes the invarient entries
extended_delta_indexes = np.zeros(2**len(inds), dtype=np.int)
for i in range(2**len(inds)):
if delta_indexes[i] == MaskedModel.delta_mask_noop_value:
extended_delta_indexes[i] = delta_indexes[i]
else:
extended_delta_indexes[i] = inds[delta_indexes[i]]
# run the model
outputs = fm(extended_delta_indexes, batch_size=batch_size)
# loop over all the outputs to update the rows
coeff = shapley_coefficients(len(inds))
row_values = np.zeros(len(fm))
mask = np.zeros(len(fm), dtype=np.bool)
_compute_grey_code_row_values(row_values, mask, inds, outputs, coeff, extended_delta_indexes, MaskedModel.delta_mask_noop_value)
# do a partition tree constrained version of Shapley values
else:
# make sure we have enough evals
if max_evals is not None and max_evals != "auto" and max_evals < len(fm)**2:
raise Exception("It takes %d masked evaluations to run the Exact explainer on this instance, but max_evals=%d" %(len(fm)**2, max_evals))
# generate the masks in a hclust order (so that we change the inputs as little
# as possible while we iterate to minimize the need to re-eval when the inputs
# don't vary from the background)
delta_indexes = self._partition_delta_indexes
# run the model
outputs = fm(delta_indexes, batch_size=batch_size)
# loop over each output feature
row_values = np.zeros(len(fm))
for i in range(len(fm)):
on_outputs = outputs[self._partition_masks_inds[i][1]]
off_outputs = outputs[self._partition_masks_inds[i][0]]
row_values[i] = (on_outputs - off_outputs).mean()
# compute the main effects if we need to
main_effect_values = None
if main_effects:
if inds is None:
inds = np.arange(len(fm))
main_effect_values = fm.main_effects(inds)
return {
"values": row_values,
"expected_values": outputs[0],
"mask_shapes": fm.mask_shapes,
"main_effects": main_effect_values,
"clustering": getattr(self.masker, "clustering", None)
}
@jit
def _compute_grey_code_row_values(row_values, mask, inds, outputs, shapley_coeff, extended_delta_indexes, noop_code):
set_size = 0
M = len(inds)
for i in range(2**M):
# update the mask
delta_ind = extended_delta_indexes[i]
if delta_ind != noop_code:
mask[delta_ind] = ~mask[delta_ind]
if mask[delta_ind]:
set_size += 1
else:
set_size -= 1
# update the output row values
on_coeff = shapley_coeff[set_size-1]
if set_size < M:
off_coeff = shapley_coeff[set_size]
out = outputs[i]
for j in inds:
if mask[j]:
row_values[j] += out * on_coeff
else:
row_values[j] -= out * off_coeff
def partition_delta_indexes(partition_tree, all_masks):
""" Return an delta index encoded array of all the masks possible while following the given partition tree.
"""
# convert the masks to delta index format
mask = np.zeros(all_masks.shape[1], dtype=np.bool)
delta_inds = []
for i in range(len(all_masks)):
inds = np.where(mask ^ all_masks[i,:])[0]
for j in inds[:-1]:
delta_inds.append(-j - 1) # negative + (-1) means we have more inds still to change...
if len(inds) == 0:
delta_inds.append(MaskedModel.delta_mask_noop_value)
else:
delta_inds.extend(inds[-1:])
mask = all_masks[i,:]
return np.array(delta_inds)
def partition_masks(partition_tree):
""" Return an array of all the masks possible while following the given partition tree.
"""
M = partition_tree.shape[0] + 1
mask_matrix = make_masks(partition_tree)
all_masks = []
m00 = np.zeros(M, dtype=np.bool)
all_masks.append(m00)
all_masks.append(~m00)
#inds_stack = [0,1]
inds_lists = [[[], []] for i in range(M)]
_partition_masks_recurse(len(partition_tree)-1, m00, 0, 1, inds_lists, mask_matrix, partition_tree, M, all_masks)
all_masks = np.array(all_masks)
# we resort the clustering matrix to minimize the sequential difference between the masks
# this minimizes the number of model evaluations we need to run when the background sometimes
# matches the foreground. We seem to average about 1.5 feature changes per mask with this
# approach. This is not as clean as the grey code ordering, but a perfect 1 feature change
# ordering is not possible with a clustering tree
order = delta_minimization_order(all_masks)
inverse_order = np.arange(len(order))[np.argsort(order)]
for inds_list0,inds_list1 in inds_lists:
for i in range(len(inds_list0)):
inds_list0[i] = inverse_order[inds_list0[i]]
inds_list1[i] = inverse_order[inds_list1[i]]
return all_masks[order], np.array([[np.array(on), np.array(off)] for on,off in inds_lists])
# TODO: this should be a jit function... which would require preallocating the inds_lists (sizes are 2**depth of that ind)
# TODO: we could also probable avoid making the masks at all and just record the deltas if we want...
def _partition_masks_recurse(index, m00, ind00, ind11, inds_lists, mask_matrix, partition_tree, M, all_masks):
if index < 0:
inds_lists[index + M][0].append(ind00)
inds_lists[index + M][1].append(ind11)
return
# get our children indexes
left_index = int(partition_tree[index,0] - M)
right_index = int(partition_tree[index,1] - M)
# build more refined masks
m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix
m10[:] += mask_matrix[left_index+M, :]
m01 = m00.copy()
m01[:] += mask_matrix[right_index+M, :]
# record the new masks we made
ind01 = len(all_masks)
all_masks.append(m01)
ind10 = len(all_masks)
all_masks.append(m10)
# inds_stack.append(len(all_masks) - 2)
# inds_stack.append(len(all_masks) - 1)
# recurse left and right with both 1 (True) and 0 (False) contexts
_partition_masks_recurse(left_index, m00, ind00, ind10, inds_lists, mask_matrix, partition_tree, M, all_masks)
_partition_masks_recurse(right_index, m10, ind10, ind11, inds_lists, mask_matrix, partition_tree, M, all_masks)
_partition_masks_recurse(left_index, m01, ind01, ind11, inds_lists, mask_matrix, partition_tree, M, all_masks)
_partition_masks_recurse(right_index, m00, ind00, ind01, inds_lists, mask_matrix, partition_tree, M, all_masks)
def gray_code_masks(nbits):
""" Produces an array of all binary patterns of size nbits in gray code order.
This is based on code from: http://code.activestate.com/recipes/576592-gray-code-generatoriterator/
"""
out = np.zeros((2**nbits, nbits), dtype=np.bool)
li = np.zeros(nbits, dtype=np.bool)
for term in range(2, (1<<nbits)+1):
if term % 2 == 1: # odd
for i in range(-1,-nbits,-1):
if li[i] == 1:
li[i-1] = li[i-1]^1
break
else: # even
li[-1] = li[-1]^1
out[term-1,:] = li
return out
def gray_code_indexes(nbits):
""" Produces an array of which bits flip at which position.
We assume the masks start at all zero and -1 means don't do a flip.
This is a more efficient represenation of the gray_code_masks version.
"""
out = np.ones(2**nbits, dtype=np.int) * MaskedModel.delta_mask_noop_value
li = np.zeros(nbits, dtype=np.bool)
for term in range((1<<nbits)-1):
if term % 2 == 1: # odd
for i in range(-1,-nbits,-1):
if li[i] == 1:
li[i-1] = li[i-1]^1
out[term+1] = nbits + (i-1)
break
else: # even
li[-1] = li[-1]^1
out[term+1] = nbits-1
return out
| 43.613115 | 154 | 0.649977 |
acdee6d77975397c5e3bde9f8cbe9547e9810be0 | 765 | py | Python | ensemble/util/calc_loss.py | topolphukhanh/kaggle-avazu | eda48cb74d2c38450ecc4af3eb2765b588b5b96e | [
"Apache-2.0"
] | 398 | 2015-02-25T15:13:41.000Z | 2019-04-08T09:58:42.000Z | ensemble/util/calc_loss.py | raincoatrun/kaggle-avazu | eda48cb74d2c38450ecc4af3eb2765b588b5b96e | [
"Apache-2.0"
] | 1 | 2017-01-02T10:57:35.000Z | 2017-01-02T10:57:35.000Z | ensemble/util/calc_loss.py | raincoatrun/kaggle-avazu | eda48cb74d2c38450ecc4af3eb2765b588b5b96e | [
"Apache-2.0"
] | 217 | 2015-02-25T15:19:17.000Z | 2019-03-29T09:06:57.000Z | #!/usr/bin/env python3
import argparse, csv, sys, pickle, math
from common import *
if len(sys.argv) == 1:
sys.argv.append('-h')
parser = argparse.ArgumentParser()
parser.add_argument('prd_path', type=str)
parser.add_argument('ans_path', type=str)
args = vars(parser.parse_args())
prd = read_prd(args['prd_path'])
ans = {}
for row in csv.DictReader(open(args['ans_path'])):
ans[row['id']] = float(row['click'])
if len(prd) < len(ans):
print('Warning: it is not a full prediction')
loss, total = 0.0, 0
for key in set(prd.keys()).intersection(ans.keys()):
if ans[key] == 1:
loss += math.log(prd[key])
else:
loss += math.log(1-prd[key])
total += 1
if total == 0:
print('nan')
else:
print(round(-loss/total, 5))
| 21.857143 | 52 | 0.630065 |
acdee7a881a2185a0dbc130b8edc7c9b1129b204 | 3,460 | py | Python | python/orient_1d.py | jht0664/Utility_python_gromacs | 4457b62e2f0252bcb38021d5deda0cfb932e3ed9 | [
"MIT"
] | 1 | 2022-01-02T11:27:59.000Z | 2022-01-02T11:27:59.000Z | python/orient_1d.py | jht0664/Utility_python_gromacs | 4457b62e2f0252bcb38021d5deda0cfb932e3ed9 | [
"MIT"
] | null | null | null | python/orient_1d.py | jht0664/Utility_python_gromacs | 4457b62e2f0252bcb38021d5deda0cfb932e3ed9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# ver 0.1 - make codes on 3/29/2018
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='calculation orientational parameters using com')
## args
parser.add_argument('-icell', '--icell', default='unit_cell.npy', nargs='?',
help='input unit cell dimension file')
parser.add_argument('-icom', '--icom', default='pol.com.npy', nargs='?',
help='input COM file')
parser.add_argument('-ivec', '--ivec', default='pol.ree.vec.npy', nargs='?',
help='input vector file')
parser.add_argument('-nmol', '--nmol', nargs='?', type=int,
help='# molecules')
parser.add_argument('-axis', '--axis', default=2, nargs='?', type=int,
help='axis for distribution')
parser.add_argument('-nbin', '--nbin', nargs='?', type=int,
help='#bins for distribution on a given axis (should be matched with nbins when convolution alignment did)')
parser.add_argument('-o', '--output', default='pol', nargs='?',
help='output prefix filename for oriental paramter files (.orient)')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
import numpy as np
from numpy import linalg as LA
import MDAnalysis as mda
from MDAnalysis.analysis import distances
from scipy.spatial.distance import euclidean
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
## timer
start_proc, start_prof = hjung.time.init()
args.output = args.output + '.orient'
## read files
unitcell = hjung.io.read_simple(args.icell,0,-1)
n_frames = len(unitcell)
com = hjung.io.read_simple(args.icom,0,-1)
if n_frames != int(len(com)/args.nmol/3):
raise ValueError("may be wrong n_frames of com data")
else:
com = com.reshape(n_frames,args.nmol,3)
ree_vec = hjung.io.read_simple(args.ivec,0,-1)
if n_frames != int(len(ree_vec)/args.nmol/3):
raise ValueError("may be wrong n_frames of ree_vec data")
else:
ree_vec = ree_vec.reshape(-1,3)
# calc. com histograms
unit_cells_1d = unitcell[:,args.axis]
com_1d = com[:,:,args.axis]
com_hist_1d_t, bin_1d_t = hjung.analyze.histo_t_1d_nbin(com_1d, unit_cells_1d, args.nbin)
# calc. orient.
# see following reference:
# Structural and thermodynamic properties of interfaces between coexisting phases in polymer blends: a Monte Carlo simulation
# Marcus Müller, Kurt Binder and Wilfried Oed, J. Chem. Soc., Faraday Trans., 1995, 91, 2369
# DOI: 10.1039/FT9959102369
# the variable "orient" is the same as eq.(28) in the paper above.
orient_abs = LA.norm(ree_vec,axis=1)**2
ree_p = ree_vec[:,args.axis]**2
ree_l1 = ree_vec[:,(args.axis-1)%3]**2
ree_l2 = ree_vec[:,(args.axis-2)%3]**2
orient = (ree_p - 0.5*(ree_l1+ree_l2))/orient_abs
orient = orient.reshape(n_frames,args.nmol)
# calc. average orient. in bins
orient_histo_1d_t = hjung.analyze.histo_xy_t_1d_wbin(com_1d, orient, bin_1d_t)
# save raw rg data file
np.savetxt(args.output, orient_histo_1d_t,
header='orientational parameters' , fmt='%f', comments='# ')
np.save(args.output, orient_histo_1d_t)
print(" saved orient_histo files")
## timer
hjung.time.end_print(start_proc, start_prof) | 36.808511 | 127 | 0.713584 |
acdee84e7f1ec9ebc15c2100044730f63a3c0807 | 8,910 | py | Python | tests/test_redis_session_interface.py | elendirx/sanic_session | 19ef4a47e1133ab4fdb8f65c8758cc6699067c71 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2019-06-11T12:39:42.000Z | 2019-06-11T12:39:42.000Z | tests/test_redis_session_interface.py | elendirx/sanic_session | 19ef4a47e1133ab4fdb8f65c8758cc6699067c71 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | tests/test_redis_session_interface.py | elendirx/sanic_session | 19ef4a47e1133ab4fdb8f65c8758cc6699067c71 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | import time
import datetime
from sanic.response import text
from sanic_session.redis import RedisSessionInterface
import pytest
import uuid
import ujson
from unittest.mock import Mock
SID = "5235262626"
COOKIE_NAME = "cookie"
COOKIES = {COOKIE_NAME: SID}
@pytest.fixture
def mock_dict():
class MockDict(dict):
pass
return MockDict
@pytest.fixture
def mock_redis():
class MockRedisConnection:
pass
return MockRedisConnection
def mock_coroutine(return_value=None):
async def mock_coro(*args, **kwargs):
return return_value
return Mock(wraps=mock_coro)
async def get_interface_and_request(mocker, redis_getter, data=None):
request = mock_dict()
request.cookies = COOKIES
data = data or {}
redis_connection = mock_redis()
redis_connection.get = mock_coroutine(ujson.dumps(data))
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
await session_interface.open(request)
return session_interface, request
@pytest.mark.asyncio
async def test_redis_should_create_new_sid_if_no_cookie(mocker, mock_redis, mock_dict):
request = mock_dict()
request.cookies = {}
redis_connection = mock_redis()
redis_connection.get = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
mocker.spy(uuid, "uuid4")
session_interface = RedisSessionInterface(redis_getter)
await session_interface.open(request)
assert uuid.uuid4.call_count == 1, "should create a new SID with uuid"
assert request["session"] == {}, "should return an empty dict as session"
@pytest.mark.asyncio
async def test_should_return_data_from_redis(mocker, mock_dict, mock_redis):
request = mock_dict()
request.cookies = COOKIES
mocker.spy(uuid, "uuid4")
data = {"foo": "bar"}
redis_connection = mock_redis()
redis_connection.get = mock_coroutine(ujson.dumps(data))
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
session = await session_interface.open(request)
assert uuid.uuid4.call_count == 0, "should not create a new SID"
assert redis_connection.get.call_count == 1, "should call on redis once"
assert redis_connection.get.call_args_list[0][0][0] == "session:{}".format(
SID
), "should call redis with prefix + SID"
assert session.get("foo") == "bar", "session data is pulled from redis"
@pytest.mark.asyncio
async def test_should_use_prefix_in_redis_key(mocker, mock_dict, mock_redis):
request = mock_dict()
prefix = "differentprefix:"
data = {"foo": "bar"}
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine(ujson.dumps(data))
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME, prefix=prefix)
await session_interface.open(request)
assert redis_connection.get.call_args_list[0][0][0] == "{}{}".format(
prefix, SID
), "should call redis with prefix + SID"
@pytest.mark.asyncio
async def test_should_use_return_empty_session_via_redis(mock_redis, mock_dict):
request = mock_dict()
prefix = "differentprefix:"
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME, prefix=prefix)
session = await session_interface.open(request)
assert session == {}
@pytest.mark.asyncio
async def test_should_attach_session_to_request(mock_redis, mock_dict):
request = mock_dict()
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, redis_connection, cookie_name=COOKIE_NAME)
session = await session_interface.open(request)
assert session == request["session"]
@pytest.mark.asyncio
async def test_should_delete_session_from_redis(mocker, mock_redis, mock_dict):
request = mock_dict()
response = mock_dict()
request.cookies = COOKIES
response.cookies = {}
redis_connection = mock_redis
redis_connection.get = mock_coroutine()
redis_connection.delete = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
await session_interface.open(request)
await session_interface.save(request, response)
assert redis_connection.delete.call_count == 1
assert redis_connection.delete.call_args_list[0][0][0] == ["session:{}".format(SID)]
assert response.cookies == {}, "should not change response cookies"
@pytest.mark.asyncio
async def test_should_expire_redis_cookies_if_modified(mock_dict, mock_redis):
request = mock_dict()
response = text("foo")
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine()
redis_connection.delete = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
await session_interface.open(request)
request["session"].clear()
await session_interface.save(request, response)
assert response.cookies[COOKIE_NAME]["max-age"] == 0
assert response.cookies[COOKIE_NAME]["expires"] < datetime.datetime.utcnow()
@pytest.mark.asyncio
async def test_should_save_in_redis_for_time_specified(mock_dict, mock_redis):
request = mock_dict()
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine(ujson.dumps({"foo": "bar"}))
redis_connection.setex = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
response = text("foo")
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
await session_interface.open(request)
request["session"]["foo"] = "baz"
await session_interface.save(request, response)
redis_connection.setex.assert_called_with("session:{}".format(SID), 2592000, ujson.dumps(request["session"]))
@pytest.mark.asyncio
async def test_should_reset_cookie_expiry(mocker, mock_dict, mock_redis):
request = mock_dict()
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine(ujson.dumps({"foo": "bar"}))
redis_connection.setex = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
response = text("foo")
mocker.patch("time.time")
time.time.return_value = 1488576462.138493
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME)
await session_interface.open(request)
request["session"]["foo"] = "baz"
await session_interface.save(request, response)
assert response.cookies[COOKIE_NAME].value == SID
assert response.cookies[COOKIE_NAME]["max-age"] == 2592000
assert response.cookies[COOKIE_NAME]["expires"] < datetime.datetime.utcnow()
@pytest.mark.asyncio
async def test_sessioncookie_should_omit_request_headers(mocker, mock_dict):
request = mock_dict()
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine(ujson.dumps({"foo": "bar"}))
redis_connection.delete = mock_coroutine()
redis_connection.setex = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
response = text("foo")
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME, sessioncookie=True)
await session_interface.open(request)
await session_interface.save(request, response)
assert response.cookies[COOKIE_NAME].value == SID
assert "max-age" not in response.cookies[COOKIE_NAME]
assert "expires" not in response.cookies[COOKIE_NAME]
@pytest.mark.asyncio
async def test_sessioncookie_delete_has_expiration_headers(mocker, mock_dict):
request = mock_dict()
request.cookies = COOKIES
redis_connection = mock_redis
redis_connection.get = mock_coroutine(ujson.dumps({"foo": "bar"}))
redis_connection.delete = mock_coroutine()
redis_connection.setex = mock_coroutine()
redis_getter = mock_coroutine(redis_connection)
response = text("foo")
session_interface = RedisSessionInterface(redis_getter, cookie_name=COOKIE_NAME, sessioncookie=True)
await session_interface.open(request)
await session_interface.save(request, response)
request["session"].clear()
await session_interface.save(request, response)
assert response.cookies[COOKIE_NAME]["max-age"] == 0
assert response.cookies[COOKIE_NAME]["expires"] < datetime.datetime.utcnow()
| 32.757353 | 113 | 0.749832 |
acdee8fe690e541ee26f806c6c6a9554c4f340ab | 8,203 | py | Python | electrum_audax/gui/qt/masternode_list.py | theaudaxproject/electrum-audax | 614845e536a21cb593376bdde7df6163b651ab21 | [
"MIT"
] | null | null | null | electrum_audax/gui/qt/masternode_list.py | theaudaxproject/electrum-audax | 614845e536a21cb593376bdde7df6163b651ab21 | [
"MIT"
] | null | null | null | electrum_audax/gui/qt/masternode_list.py | theaudaxproject/electrum-audax | 614845e536a21cb593376bdde7df6163b651ab21 | [
"MIT"
] | 2 | 2019-10-08T06:52:54.000Z | 2019-10-24T03:33:20.000Z | #!/usr/bin/env python
#
# Electrum AUDAX - lightweight Audax client
# Copyright (C) 2019 The Audax Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtCore import (Qt, QSortFilterProxyModel, QModelIndex, QAbstractItemModel,
QVariant, QItemSelectionModel)
from PyQt5.QtWidgets import (QMenu, QHeaderView, QMessageBox)
from electrum_audax.i18n import _
from electrum_audax.util import OrderedDictWithIndex, age, format_time
from .util import MyTreeView, MONOSPACE_FONT, WaitingDialog
from decimal import Decimal
from datetime import datetime, timedelta
from electrum_audax.logging import Logger
import traceback
class MasternodeList(MyTreeView, Logger):
class Columns(IntEnum):
ALIAS = 0
ADDRESS = 1
PROTOCOL_VERSION = 2
STATUS = 3
ACTIVE = 4
LASTSEEN = 5
COLLATERAL = 6
headers = {
Columns.ALIAS: _('Alias'),
Columns.ADDRESS: _('Address'),
Columns.PROTOCOL_VERSION: _('Protocol'),
Columns.STATUS: _('Status'),
Columns.ACTIVE: _('Active'),
Columns.LASTSEEN: _('Last Seen'),
Columns.COLLATERAL: _('Collateral Tx'),
}
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.ALIAS,
editable_columns=[self.Columns.ALIAS])
Logger.__init__(self)
self.manager = None
self.setModel(QStandardItemModel(self))
self.setSortingEnabled(True)
self.setColumnWidth(self.Columns.ALIAS, 180)
self.header().setMinimumSectionSize(100)
def create_menu(self, position):
idx = self.indexAt(position)
if not idx.isValid():
return
alias = self.model().itemFromIndex(
idx.sibling(idx.row(), self.Columns.ALIAS)).text()
transaction = self.model().itemFromIndex(idx.sibling(
idx.row(), self.Columns.COLLATERAL)).text().split(':')[0]
menu = QMenu()
menu.addAction(_("Start Masternode"),
lambda: self.start_masternode(alias))
menu.addAction(_("Copy Transaction"),
lambda: self.parent.app.clipboard().setText(transaction))
menu.addSeparator()
menu.addAction(_("Delete"), lambda: self.delete_masternode(alias))
menu.addAction("Test", lambda: self.test_masternode(alias))
menu.exec_(self.viewport().mapToGlobal(position))
return
def test_masternode(self, alias):
# f99f2822b5e19c488eef220d95d8c7214b44ec25779341ae38292d95873020fb
self.manager.populate_masternode_output(alias)
self.manager.sign_announce(alias, None)
mn = self.manager.get_masternode(alias)
print(mn.private_key)
print(mn.masternode_pubkey)
print('01'+mn.serialize())
def start_masternode(self, alias):
"""Sign an announce for alias. This is called by SignAnnounceWidget."""
def broadcast_thread():
return self.manager.send_announce(alias)
def broadcast_done(result):
mn = self.manager.get_masternode(alias)
# TODO: Check broadcast status.
print(mn.get_hash())
# force masternode list reload.
self.manager.send_subscriptions(True)
QMessageBox.information(self, _('Success'), _(
'Masternode activated successfully.'))
def broadcast_error(err):
self.logger.info(
'Error sending Masternode Announce message: ' + str(err))
# Print traceback information to error log.
self.logger.info(''.join(traceback.format_tb(err[2])))
self.logger.info(
''.join(traceback.format_exception_only(err[0], err[1])))
pw = None
if self.manager.wallet.has_password():
pw = self.parent.password_dialog(
msg=_('Please enter your password to activate masternode "%s".' % alias))
if pw is None:
return
try:
self.manager.populate_masternode_output(alias)
self.manager.sign_announce(alias, pw)
except Exception as e:
QMessageBox.information(self, _('Error'), str(e))
self.logger.info('Sending Masternode Announce message...')
WaitingDialog(self, _('Broadcasting masternode...'),
broadcast_thread, broadcast_done, broadcast_error)
return
def delete_masternode(self, alias):
if QMessageBox.question(self, _('Delete'), _('Do you want to remove the masternode configuration for') + ' %s?' % alias,
QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:
self.manager.remove_masternode(alias)
self.update()
def update(self):
self.model().clear()
self.update_headers(self.__class__.headers)
if not self.manager:
return
for idx, mn in enumerate(self.manager.masternodes):
mn_data = self.manager.masternode_data.get(mn.get_collateral_str())
status = _('MISSING')
ip = ''
protocol_version = 0
if mn.addr.ip:
ip = str(mn.addr)
activetime_str = _('Unknown')
lastseen_str = _('Unknown')
if mn_data is not None:
protocol_version = mn_data['version']
status = mn_data['status']
if status == "ACTIVE":
activetime_str = _("Pending Activation")
else:
activetime_str = age(
int((datetime.now() - timedelta(seconds=mn_data['activetime'])).timestamp()))
lastseen_str = format_time(mn_data['lastseen'])
labels = [mn.alias, ip, str(protocol_version), status, activetime_str, lastseen_str, mn.vin.get(
'prevout_hash')+':'+str(mn.vin.get('prevout_n'))]
items = [QStandardItem(e) for e in labels]
for i, item in enumerate(items):
item.setTextAlignment(Qt.AlignVCenter)
item.setEditable(i in self.editable_columns)
items[self.Columns.ALIAS].setData(0, Qt.UserRole)
self.model().insertRow(idx, items)
self.set_current_idx(0)
h = self.header()
h.setStretchLastSection(False)
for col in self.Columns:
sm = QHeaderView.Stretch if col == self.stretch_column else QHeaderView.ResizeToContents
h.setSectionResizeMode(col, sm)
def on_edited(self, idx, user_role, text):
item = self.model().itemFromIndex(idx.sibling(
idx.row(), self.Columns.COLLATERAL)).text()
for i, mn in enumerate(self.manager.masternodes):
if item == mn.vin.get('prevout_hash')+':'+str(mn.vin.get('prevout_n')):
if mn.alias != text:
self.manager.masternodes[i].alias = text
self.manager.save()
break
| 36.95045 | 128 | 0.626844 |
acdee928e3d88250ac77200fa724df07c1da5d48 | 8,935 | py | Python | lib/pyrestful/rest.py | zhkzyth/tornado-async-rest-api | 13180eee10f9db93b5f77d982636d63ae3771c27 | [
"MIT"
] | 21 | 2015-01-31T08:41:34.000Z | 2020-08-03T12:25:53.000Z | lib/pyrestful/rest.py | zhkzyth/tornado-async-rest-api | 13180eee10f9db93b5f77d982636d63ae3771c27 | [
"MIT"
] | null | null | null | lib/pyrestful/rest.py | zhkzyth/tornado-async-rest-api | 13180eee10f9db93b5f77d982636d63ae3771c27 | [
"MIT"
] | 8 | 2016-01-12T11:48:53.000Z | 2020-11-05T07:53:01.000Z | ##!/usr/bin/env python
# encoding: utf-8
#
# Copyright 2013 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.ioloop
import tornado.web
import xml.dom.minidom
import inspect
import re
import json
from pyrestful import mediatypes, types
class PyRestfulException(Exception):
""" Class for PyRestful exceptions """
def __init__(self,message):
self.message = message
def __str__(self):
return repr(self.message)
def config(func,method,**kwparams):
""" Decorator config function """
path = None
produces = None
consumes = None
types = None
if len(kwparams):
path = kwparams['_path']
if '_produces' in kwparams:
produces = kwparams['_produces']
else:
produces = mediatypes.APPLICATION_JSON
if '_consumes' in kwparams:
consumes = kwparams['_consumes']
if '_types' in kwparams:
types = kwparams['_types']
def operation(*args,**kwargs):
return func(*args,**kwargs)
operation.func_name = func.func_name
operation._func_params = inspect.getargspec(func).args[1:] # cool
operation._types = types or [str]*len(operation._func_params)
operation._service_name = re.findall(r"(?<=/)\w+",path) # re.findall("(?<=/)\w+","/var/{hello}/happy") ===> ["var", "happy"]
operation._service_params = re.findall(r"(?<={)\w+",path)
operation._method = method #GET/POST/DELETE/PUT
operation._produces = produces
operation._consumes = consumes
operation._query_params = re.findall(r"(?<=<)\w+",path)
operation._path = path
if not operation._produces in [mediatypes.APPLICATION_JSON,mediatypes.APPLICATION_XML,mediatypes.TEXT_XML]:
raise PyRestfulException("The media type used do not exist : "+operation.func_name)
return operation
def get(*params, **kwparams):
""" Decorator for config a python function like a Rest GET verb """
def method(f):
return config(f,'GET',**kwparams)
return method
def post(*params, **kwparams):
""" Decorator for config a python function like a Rest POST verb """
def method(f):
return config(f,'POST',**kwparams)
return method
def put(*params, **kwparams):
""" Decorator for config a python function like a Rest PUT verb """
def method(f):
return config(f,'PUT',**kwparams)
return method
def delete(*params, **kwparams):
""" Decorator for config a python function like a Rest PUT verb """
def method(f):
return config(f,'DELETE',**kwparams)
return method
class RestHandler(tornado.web.RequestHandler):
def get(self):
""" Executes get method """
self._exe('GET')
def post(self):
""" Executes post method """
self._exe('POST')
def put(self):
""" Executes put method"""
self._exe('PUT')
def delete(self):
""" Executes put method"""
self._exe('DELETE')
def _exe(self, method):
""" Executes the python function for the Rest Service """
request_path = self.request.path
path = request_path.split('/')
services_and_params = filter(lambda x: x!='',path)
# Get all funcion names configured in the class RestHandler
functions = filter(lambda op: hasattr(getattr(self,op),'_service_name') == True and inspect.ismethod(getattr(self,op)) == True, dir(self))
# Get all http methods configured in the class RestHandler
http_methods = map(lambda op: getattr(getattr(self,op),'_method'), functions)
if method not in http_methods:
raise tornado.web.HTTPError(405,'The service not have %s verb'%method)
for operation in map(lambda op: getattr(self,op), functions):
service_name = getattr(operation, "_service_name")
service_params = getattr(operation, "_service_params")
# If the _types is not specified, assumes str types for the params
params_types = getattr(operation,"_types") or [str]*len(service_params)
params_types = map(lambda x,y : y if x is None else x, params_types, [str]*len(service_params))
# produces = getattr(operation,"_produces")
services_from_request = filter(lambda x: x in path,service_name)
# query_params = getattr(operation,"_query_params")
# FIXME 为了兼容motor的异步调用逻辑,这里hack了部分pyrestful的代码
if operation._method == self.request.method and service_name == services_from_request and len(service_params) + len(service_name) == len(services_and_params):
try:
# 参数的映射关系非常粗暴,基本就是按照顺序一个一个对应起来...囧
params_values = self._find_params_value_of_url(service_name,request_path) + self._find_params_value_of_arguments(operation)
p_values = self._convert_params_values(params_values, params_types)
response = operation(*p_values)
except Exception as detail:
raise tornado.web.HTTPError(500,"Internal Server Error : %s"%detail)
def _find_params_value_of_url(self,services,url):
""" Find the values of path params """
values_of_query = list()
url_split = url.split("/")
values = [item for item in url_split if item not in services and item != '']
for v in values:
if v != None:
values_of_query.append(v)
return values_of_query
def _find_params_value_of_arguments(self, operation):
values = []
if len(self.request.arguments) > 0:
service_params = operation._service_params
func_params = operation._func_params
# order matter??
params = [item for item in func_params if item not in service_params]
for p in params:
if p in self.request.arguments.keys():
v = self.request.arguments[p]
values.append(v[0]) # FIXME what about if the value is a list
else:
values.append(None)
elif len(self.request.arguments) == 0 and len(operation._query_params) > 0:
values = [None]*(len(operation._func_params) - len(operation._service_params))
return values
def _convert_params_values(self, values_list, params_types):
""" Converts the values to the specifics types """
values = list()
i = 0
for v in values_list:
if v != None:
values.append(types.convert(v,params_types[i]))
else:
values.append(v)
i+=1
return values
@classmethod
def get_services(self):
""" Generates the resources (uri) to deploy the Rest Services """
services = []
for f in dir(self):
o = getattr(self,f)
if callable(o) and hasattr(o,'_service_name'):
services.append(getattr(o,'_service_name'))
return services
@classmethod
def get_paths(self):
""" Generates the resources from path (uri) to deploy the Rest Services """
paths = []
for f in dir(self):
o = getattr(self,f)
if callable(o) and hasattr(o,'_path'):
paths.append(getattr(o,'_path'))
return paths
class RestService(tornado.web.Application):
""" Class to create Rest services in tornado web server """
resource = None
def __init__(self, rest_handlers, resource=None, handlers=None, default_host="", transforms=None, wsgi=False, **settings):
restservices = []
self.resource = resource
for r in rest_handlers:
svs = self._generateRestServices(r)
restservices += svs
if handlers != None:
restservices += handlers
super(RestService, self).__init__(restservices, default_host, transforms, wsgi, **settings)
def _generateRestServices(self,rest):
svs = []
paths = rest.get_paths()
# 针对tornado路由规则做的一些hack
for p in paths:
s = re.sub(r"(?<={)\w+}",".*",p).replace("{","")
o = re.sub(r"(?<=<)\w+","",s).replace("<","").replace(">","").replace("&","").replace("?","")
svs.append((o,rest,self.resource))
return svs
| 37.700422 | 170 | 0.615445 |
acdee93e666364e6f6ef4f31d0c69d4bfc0a79cb | 3,525 | py | Python | migrations/versions/b46d7b9df393_initial_migration.py | Missy-Olivia/Blog-Perso | 70de6c6860cb5ae12627cb28ee590e5c24579cb9 | [
"MIT"
] | null | null | null | migrations/versions/b46d7b9df393_initial_migration.py | Missy-Olivia/Blog-Perso | 70de6c6860cb5ae12627cb28ee590e5c24579cb9 | [
"MIT"
] | null | null | null | migrations/versions/b46d7b9df393_initial_migration.py | Missy-Olivia/Blog-Perso | 70de6c6860cb5ae12627cb28ee590e5c24579cb9 | [
"MIT"
] | null | null | null | """Initial Migration
Revision ID: b46d7b9df393
Revises:
Create Date: 2020-12-14 22:45:09.889422
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b46d7b9df393'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('mail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_mail_email'), 'mail', ['email'], unique=True)
op.create_index(op.f('ix_mail_name'), 'mail', ['name'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_path', sa.String(), nullable=True),
sa.Column('password_secure', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('blogs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('content', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('picture', sa.String(length=225), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('blog_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dislikes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dislike', sa.Integer(), nullable=True),
sa.Column('blog_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('likes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('like', sa.Integer(), nullable=True),
sa.Column('blog_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['blog_id'], ['blogs.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('likes')
op.drop_table('dislikes')
op.drop_table('comments')
op.drop_table('blogs')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_mail_name'), table_name='mail')
op.drop_index(op.f('ix_mail_email'), table_name='mail')
op.drop_table('mail')
# ### end Alembic commands ###
| 38.315217 | 83 | 0.658156 |
acdee9af46ef430dc2303217e63fa297134d372b | 12,136 | py | Python | src/mlm/models/__init__.py | vblagoje/mlm-scoring | 672729747432810f9bcb37149104124dd3cc4165 | [
"Apache-2.0"
] | null | null | null | src/mlm/models/__init__.py | vblagoje/mlm-scoring | 672729747432810f9bcb37149104124dd3cc4165 | [
"Apache-2.0"
] | null | null | null | src/mlm/models/__init__.py | vblagoje/mlm-scoring | 672729747432810f9bcb37149104124dd3cc4165 | [
"Apache-2.0"
] | null | null | null | import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Tuple
# MXNet-based
import mxnet as mx
from mxnet.gluon import Block
import gluonnlp as nlp
from gluonnlp.model import get_model as _get_model
# PyTorch-based
import torch
import transformers
from .gpt2 import gpt2_117m, gpt2_345m
from .bert import BERTRegression, AlbertForMaskedLMOptimized, BertForMaskedLMOptimized, DistilBertForMaskedLMOptimized
# get_model() is from:
# https://github.com/dmlc/gluon-nlp/blob/master/scripts/text_generation/model/__init__.py
def get_model(name: str, **kwargs) -> Tuple[Block, nlp.Vocab]:
"""Returns a pre-defined model by name.
In addition to the models in GluonNLP model API, this API supports getting GPT-2 models.
Parameters
----------
name : str
Name of the model.
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
For language model, options are 'wikitext-2'.
For ELMo, Options are 'gbw' and '5bw'.
'gbw' represents 1 Billion Word Language Model Benchmark
http://www.statmt.org/lm-benchmark/;
'5bw' represents a dataset of 5.5B tokens consisting of
Wikipedia (1.9B) and all of the monolingual news crawl data from WMT 2008-2012 (3.6B).
If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
None Vocabulary object is required with the ELMo model.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '$MXNET_HOME/models' with MXNET_HOME defaults to '~/.mxnet'
Location for keeping the model parameters.
Returns
-------
gluon.Block, gluonnlp.Vocab, (optional) gluonnlp.Vocab
"""
models: Dict[str, Block] = {
'gpt2_117m' : gpt2_117m,
'gpt2_345m' : gpt2_345m
}
name = name.lower()
if name not in models:
return _get_model(name, **kwargs)
return models[name](**kwargs)
# Shortcodes for MXNet models
# These should not conflict w/ HuggingFace Transformer's shortcodes
SUPPORTED_MLMS = [
'bert-base-en-uncased',
'bert-base-en-cased',
'roberta-base-en-cased',
'bert-large-en-uncased',
'bert-large-en-cased',
'roberta-large-en-cased',
'bert-base-en-uncased-owt',
'bert-base-multi-uncased',
'bert-base-multi-cased'
]
SUPPORTED_LMS = [
'gpt2-117m-en-cased',
'gpt2-345m-en-cased'
]
SUPPORTED = SUPPORTED_MLMS + SUPPORTED_LMS
def get_pretrained(ctxs: List[mx.Context], name: str = 'bert-base-en-uncased', params_file: Optional[Path] = None, cased: bool = False, finetune: bool = False, regression: bool = False, freeze: int = 0, root: Optional[Path] = None) -> Tuple[Block, nlp.Vocab, nlp.data.BERTTokenizer]:
if name not in SUPPORTED:
logging.warn("Model '{}' not recognized as an MXNet model; treating as PyTorch model".format(name))
model_fullname = name
if model_fullname.startswith('albert-'):
if params_file is None:
model, loading_info = AlbertForMaskedLMOptimized.from_pretrained(model_fullname, output_loading_info=True)
else:
model, loading_info = AlbertForMaskedLMOptimized.from_pretrained(params_file, output_loading_info=True)
tokenizer = transformers.AlbertTokenizer.from_pretrained(model_fullname)
vocab = None
elif model_fullname.startswith('bert-'):
if params_file is None:
model, loading_info = BertForMaskedLMOptimized.from_pretrained(model_fullname, output_loading_info=True)
else:
model, loading_info = BertForMaskedLMOptimized.from_pretrained(params_file, output_loading_info=True)
tokenizer = transformers.BertTokenizer.from_pretrained(model_fullname)
vocab = None
elif model_fullname.startswith('distilbert-'):
if params_file is None:
model, loading_info = DistilBertForMaskedLMOptimized.from_pretrained(model_fullname, output_loading_info=True)
else:
model, loading_info = DistilBertForMaskedLMOptimized.from_pretrained(params_file, output_loading_info=True)
tokenizer = transformers.DistilBertTokenizer.from_pretrained(model_fullname)
vocab = None
elif model_fullname.startswith('xlm-'):
model, loading_info = transformers.XLMWithLMHeadModel.from_pretrained(model_fullname, output_loading_info=True)
tokenizer = transformers.XLMTokenizer.from_pretrained(model_fullname)
vocab = None
# TODO: Not needed in transformers v3? Will vet.
#
# # TODO: The loading code in `transformers` assumes pred_layer is under transformers, so the LM head is not loaded properly. We load manually:
# archive_file = transformers.XLMWithLMHeadModel.pretrained_model_archive_map[model_fullname]
# resolved_archive_file = transformers.file_utils.cached_path(archive_file)
# pretrained_state_dict = torch.load(resolved_archive_file, map_location='cpu')
# new_state_dict = model.state_dict()
# new_state_dict.update(
# {
# 'pred_layer.proj.weight': pretrained_state_dict['pred_layer.proj.weight'],
# 'pred_layer.proj.bias': pretrained_state_dict['pred_layer.proj.bias']
# }
# )
# model.load_state_dict(new_state_dict)
else:
raise ValueError("Model '{}' is not currently a supported PyTorch model".format(name))
# Name format: model-size-lang-cased/uncased(-dataset / special characteristic)
# e.g., 'bert-base-en-uncased-owt', 'gpt2-117m-en-cased'
else:
name_parts = name.split('-')
model_name = name_parts[0]
size = name_parts[1]
lang = name_parts[2]
if name_parts[3] == 'cased':
cased = True
elif name_parts[3] == 'uncased':
cased = False
dataset = name_parts[4] if len(name_parts) == 5 else None
if freeze < 0:
raise ValueError("# of initial layers to freeze must be non-negative")
if params_file is not None and dataset is not None:
logging.warning("Model parameters '{}' was provided, ignoring dataset suffix '{}'".format(params_file, dataset))
if model_name == 'bert'and size != 'base_bertpr':
if cased:
dataset_suffix = '_cased'
else:
dataset_suffix = '_uncased'
if size == 'base':
model_fullname = 'bert_12_768_12'
elif size == 'large':
model_fullname = 'bert_24_1024_16'
if lang == 'en':
if dataset is None:
dataset_prefix = 'book_corpus_wiki_en'
elif dataset == 'owt':
dataset_prefix = 'openwebtext_book_corpus_wiki_en'
elif lang == 'multi':
dataset_prefix = 'wiki_multilingual'
# Get stock BERT with MLM outputs
kwargs = {
'dataset_name': dataset_prefix + dataset_suffix,
'pretrained': True,
'ctx': ctxs,
'use_pooler': False,
'use_decoder': False,
'use_classifier': False
}
if finetune or regression:
kwargs['use_pooler'] = True
else:
kwargs['use_decoder'] = True
# Override GluonNLP's default location?
if root is not None:
kwargs['root'] = str(root)
model, vocab = get_model(model_fullname, **kwargs)
# Freeze initial layers if needed
for i in range(freeze):
model.encoder.transformer_cells[i].collect_params().setattr('grad_req', 'null')
# Wrapper if appropriate
if regression:
# NOTE THIS:
model = BERTRegression(model, dropout=0.1)
model.regression.initialize(init=mx.init.Normal(1.0), ctx=ctxs)
# MXNet warning message suggests this when softmaxing in float16
# But float16 is buggy, so let's halve our inference speed for now :(
# os.environ['MXNET_SAFE_ACCUMULATION'] = '1'
# model.cast('float16')
# Get tokenizer
tokenizer = nlp.data.BERTTokenizer(vocab, lower=(not cased))
elif model_name == 'roberta':
if cased:
dataset_suffix = '_cased'
else:
ValueError('Uncased not supported')
if size == 'base':
model_fullname = 'roberta_12_768_12'
elif size == 'large':
model_fullname = 'roberta_24_1024_16'
if lang == 'en' and dataset is None:
dataset_prefix = 'openwebtext_ccnews_stories_books'
else:
ValueError('Dataset not supported')
# Get stock BERT with MLM outputs
kwargs = {
'dataset_name': dataset_prefix + dataset_suffix,
'pretrained': True,
'ctx': ctxs,
'use_pooler': False,
'use_decoder': False,
'use_classifier': False
}
if finetune or regression:
kwargs['use_pooler'] = True
else:
kwargs['use_decoder'] = True
# Override GluonNLP's default location?
if root is not None:
kwargs['root'] = str(root)
model, vocab = get_model(model_fullname, **kwargs)
# Freeze initial layers if needed
for i in range(freeze):
model.encoder.transformer_cells[i].collect_params().setattr('grad_req', 'null')
# Wrapper if appropriate
if regression:
ValueError("Not yet tested")
# NOTE THIS:
model = BERTRegression(model, dropout=0.1)
model.regression.initialize(init=mx.init.Normal(1.0), ctx=ctxs)
# Get tokenizer
tokenizer = nlp.data.GPT2BPETokenizer()
# TODO: Have the scorers condition on what the vocab and tokenizer class are
vocab.cls_token = vocab.bos_token
vocab.sep_token = vocab.eos_token
tokenizer.convert_tokens_to_ids = vocab.to_indices
elif model_name == 'gpt2':
assert cased
assert not finetune
assert not regression
assert freeze == 0
if size == '117m':
model_fullname = 'gpt2_117m'
elif size == '345m':
model_fullname = 'gpt2_345m'
# Get stock GPT-2
kwargs = {
'dataset_name': 'openai_webtext',
'pretrained': True,
'ctx': ctxs,
}
# Override GluonNLP's default location?
if root is not None:
kwargs['root'] = str(root)
model, vocab = get_model(model_fullname, **kwargs)
# Get tokenizer
tokenizer = nlp.data.GPT2BPETokenizer()
# To fit the assumptions of score block
tokenizer.vocab = vocab
vocab.cls_token = vocab.eos_token
vocab.sep_token = vocab.eos_token
tokenizer.convert_tokens_to_ids = vocab.to_indices
if params_file is not None:
model.load_parameters(str(params_file),
ctx=ctxs, allow_missing=True, ignore_extra=True, cast_dtype=True)
return model, vocab, tokenizer
| 38.773163 | 283 | 0.606707 |
acdeea7a099b949c408a1afbbfdb8255d517a725 | 3,853 | py | Python | tests/test_types.py | callahad/synapse | 066068f03478753b7d838ae49e87d7a6cde80fd6 | [
"Apache-2.0"
] | 1 | 2021-03-29T22:47:31.000Z | 2021-03-29T22:47:31.000Z | tests/test_types.py | callahad/synapse | 066068f03478753b7d838ae49e87d7a6cde80fd6 | [
"Apache-2.0"
] | 1 | 2019-04-04T05:08:04.000Z | 2019-04-04T05:08:04.000Z | tests/test_types.py | callahad/synapse | 066068f03478753b7d838ae49e87d7a6cde80fd6 | [
"Apache-2.0"
] | 1 | 2021-09-27T14:45:52.000Z | 2021-09-27T14:45:52.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.errors import SynapseError
from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart
from tests import unittest
class UserIDTestCase(unittest.HomeserverTestCase):
def test_parse(self):
user = UserID.from_string("@1234abcd:test")
self.assertEquals("1234abcd", user.localpart)
self.assertEquals("test", user.domain)
self.assertEquals(True, self.hs.is_mine(user))
def test_pase_empty(self):
with self.assertRaises(SynapseError):
UserID.from_string("")
def test_build(self):
user = UserID("5678efgh", "my.domain")
self.assertEquals(user.to_string(), "@5678efgh:my.domain")
def test_compare(self):
userA = UserID.from_string("@userA:my.domain")
userAagain = UserID.from_string("@userA:my.domain")
userB = UserID.from_string("@userB:my.domain")
self.assertTrue(userA == userAagain)
self.assertTrue(userA != userB)
class RoomAliasTestCase(unittest.HomeserverTestCase):
def test_parse(self):
room = RoomAlias.from_string("#channel:test")
self.assertEquals("channel", room.localpart)
self.assertEquals("test", room.domain)
self.assertEquals(True, self.hs.is_mine(room))
def test_build(self):
room = RoomAlias("channel", "my.domain")
self.assertEquals(room.to_string(), "#channel:my.domain")
def test_validate(self):
id_string = "#test:domain,test"
self.assertFalse(RoomAlias.is_valid(id_string))
class GroupIDTestCase(unittest.TestCase):
def test_parse(self):
group_id = GroupID.from_string("+group/=_-.123:my.domain")
self.assertEqual("group/=_-.123", group_id.localpart)
self.assertEqual("my.domain", group_id.domain)
def test_validate(self):
bad_ids = ["$badsigil:domain", "+:empty"] + [
"+group" + c + ":domain" for c in "A%?æ£"
]
for id_string in bad_ids:
try:
GroupID.from_string(id_string)
self.fail("Parsing '%s' should raise exception" % id_string)
except SynapseError as exc:
self.assertEqual(400, exc.code)
self.assertEqual("M_INVALID_PARAM", exc.errcode)
class MapUsernameTestCase(unittest.TestCase):
def testPassThrough(self):
self.assertEqual(map_username_to_mxid_localpart("test1234"), "test1234")
def testUpperCase(self):
self.assertEqual(map_username_to_mxid_localpart("tEST_1234"), "test_1234")
self.assertEqual(
map_username_to_mxid_localpart("tEST_1234", case_sensitive=True),
"t_e_s_t__1234",
)
def testSymbols(self):
self.assertEqual(
map_username_to_mxid_localpart("test=$?_1234"), "test=3d=24=3f_1234"
)
def testLeadingUnderscore(self):
self.assertEqual(map_username_to_mxid_localpart("_test_1234"), "=5ftest_1234")
def testNonAscii(self):
# this should work with either a unicode or a bytes
self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast")
self.assertEqual(
map_username_to_mxid_localpart("têst".encode("utf-8")), "t=c3=aast"
)
| 35.027273 | 86 | 0.670646 |
acdeeb6b15db08a479fbeb84629ebc19812eaf90 | 1,175 | py | Python | envdsys/envcontacts/migrations/0099_auto_20210415_2238.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 1 | 2021-11-06T19:22:53.000Z | 2021-11-06T19:22:53.000Z | envdsys/envcontacts/migrations/0099_auto_20210415_2238.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | 25 | 2019-06-18T20:40:36.000Z | 2021-07-23T20:56:48.000Z | envdsys/envcontacts/migrations/0099_auto_20210415_2238.py | NOAA-PMEL/envDataSystem | 4db4a3569d2329658799a3eef06ce36dd5c0597d | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-15 22:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envcontacts', '0098_auto_20210415_2236'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email1_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='email2_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone1_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone2_type',
field=models.CharField(choices=[('H', 'Home'), ('W', 'Work'), ('M', 'Mobile'), ('O', 'Other')], default='M', max_length=1),
),
]
| 34.558824 | 135 | 0.530213 |
acdeec6ce8633021a038ab36f6048f331bf8ba20 | 2,916 | py | Python | root_gnn/src/datasets/wprimefiltered.py | jacoblyons98/root_gnn | 1bb2bd7a985e60f2afd34622b90abf87c8de2fa7 | [
"Apache-2.0"
] | 1 | 2022-01-24T04:47:06.000Z | 2022-01-24T04:47:06.000Z | root_gnn/src/datasets/wprimefiltered.py | Andris-Huang/root_gnn | cf4ff0be6800f19fca4d8365426bf031de2809c9 | [
"Apache-2.0"
] | null | null | null | root_gnn/src/datasets/wprimefiltered.py | Andris-Huang/root_gnn | cf4ff0be6800f19fca4d8365426bf031de2809c9 | [
"Apache-2.0"
] | null | null | null | import os
import tensorflow as tf
import numpy as np
from graph_nets import utils_tf
from root_gnn.src.datasets.base import DataSet
from root_gnn.src.datasets import graph
from root_gnn.utils import load_model
class WTaggerFilteredDataset(DataSet):
def __init__(self, *args, **kwargs):
self.edge_cut = 0.5
self.is_signal = False
super().__init__(*args, **kwargs)
def set_gnn_config(self, config):
self.model, self.num_mp, self.batch_size = load_model(config)
def signal(self, ss=True):
self.is_signal = ss
def read(self, filename):
filenames = tf.io.gfile.glob(filename)
dataset = tf.data.TFRecordDataset(filenames)
AUTO = tf.data.experimental.AUTOTUNE
dataset = dataset.map(graph.parse_tfrec_function, num_parallel_calls=AUTO)
total_evts = sum([1 for _ in dataset])
print("Total {:,} events".format(total_evts))
for data in dataset:
yield data
def make_graph(self, event, debug):
inputs_tr, _ = event
# apply the GNN model and filter out the edges with a score less than the threshold 0.5.
outputs_tr = self.model(inputs_tr, self.num_mp, is_training=False)
output_graph = outputs_tr[-1]
# calculate similar variables for GNN-based reconstruction
# method-one, place a threshold on edge score
edge_predict = np.squeeze(output_graph.edges.numpy())
edge_passed = edge_predict > self.edge_cut
nodes_sel = np.unique(np.concatenate([output_graph.receivers.numpy()[edge_passed],\
output_graph.senders.numpy()[edge_passed]], axis=0))
n_nodes = nodes_sel.shape[0]
n_edges = sum(edge_passed)
nodes = inputs_tr.nodes.numpy()[nodes_sel]
edges = inputs_tr.edges.numpy()[edge_passed]
node_dicts = {}
for idx, val in enumerate(nodes_sel):
node_dicts[val] = idx
senders = np.array([node_dicts[x] for x in inputs_tr.senders.numpy()[edge_passed]])
receivers = np.array([node_dicts[x] for x in inputs_tr.receivers.numpy()[edge_passed]])
input_datadict = {
"n_node": n_nodes,
"n_edge": n_edges,
"nodes": nodes,
"edges": edges,
"senders": senders,
"receivers": receivers,
"globals": np.array([n_nodes], dtype=np.float32)
}
target_datadict = {
"n_node": n_nodes,
"n_edge": n_edges,
"nodes": nodes,
"edges": edges,
"senders": senders,
"receivers": receivers,
"globals": np.array([float(self.is_signal)], dtype=np.float32)
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([input_datadict])
target_graph = utils_tf.data_dicts_to_graphs_tuple([target_datadict])
return [(input_graph, target_graph)] | 35.560976 | 96 | 0.628944 |
acdeed06d76e16d91418262ac8b7f446177ac62b | 10,847 | py | Python | test/integration_tests/test_bids_download.py | poldracklab/bids-core | b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e | [
"MIT"
] | 1 | 2016-03-09T01:24:02.000Z | 2016-03-09T01:24:02.000Z | test/integration_tests/test_bids_download.py | poldracklab/bids-core | b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e | [
"MIT"
] | 15 | 2016-02-17T19:11:32.000Z | 2018-04-12T23:33:06.000Z | test/integration_tests/test_bids_download.py | poldracklab/bids-core | b87a1ef2d3e1c5a79a98c0f0ba82b1b2634bce0e | [
"MIT"
] | 4 | 2017-04-05T17:34:59.000Z | 2018-01-22T01:40:51.000Z | import requests
import json
import time
import logging
import tarfile
import os
from nose.tools import with_setup
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
log.setLevel(logging.INFO)
base_url = 'http://localhost:8080/api'
test_data = type('',(object,),{'sessions': [], 'acquisitions': []})()
session = None
file_list = [
"bids_dataset/CHANGES",
"bids_dataset/dataset_description.json",
"bids_dataset/participants.tsv",
"bids_dataset/README",
"bids_dataset/task-livingnonlivingdecisionwithplainormirrorreversedtext_bold.json",
"bids_dataset/sub-01/ses-pre/func/sub-01_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-01/ses-pre/func/sub-01_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-01/ses-pre/func/sub-01_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-01/ses-pre/func/sub-01_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-01/ses-pre/anat/sub-01_ses-pre_inplaneT2.nii.gz",
"bids_dataset/sub-01/ses-pre/anat/sub-01_ses-pre_T1w.nii.gz",
"bids_dataset/sub-01/ses-post/func/sub-01_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-01/ses-post/func/sub-01_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-01/ses-post/func/sub-01_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-01/ses-post/func/sub-01_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-01/ses-post/anat/sub-01_ses-post_inplaneT2.nii.gz",
"bids_dataset/sub-01/ses-post/anat/sub-01_ses-post_T1w.nii.gz",
"bids_dataset/sub-02/ses-pre/func/sub-02_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-02/ses-pre/func/sub-02_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-02/ses-pre/func/sub-02_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-02/ses-pre/func/sub-02_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-02/ses-pre/anat/sub-02_ses-pre_inplaneT2.nii.gz",
"bids_dataset/sub-02/ses-pre/anat/sub-02_ses-pre_T1w.nii.gz",
"bids_dataset/sub-02/ses-post/func/sub-02_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-02/ses-post/func/sub-02_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-02/ses-post/func/sub-02_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-02/ses-post/func/sub-02_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-02/ses-post/anat/sub-02_ses-post_inplaneT2.nii.gz",
"bids_dataset/sub-02/ses-post/anat/sub-02_ses-post_T1w.nii.gz",
"bids_dataset/sub-03/ses-pre/func/sub-03_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-03/ses-pre/func/sub-03_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-03/ses-pre/func/sub-03_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-03/ses-pre/func/sub-03_ses-pre_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-03/ses-pre/anat/sub-03_ses-pre_inplaneT2.nii.gz",
"bids_dataset/sub-03/ses-pre/anat/sub-03_ses-pre_T1w.nii.gz",
"bids_dataset/sub-03/ses-post/func/sub-03_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz",
"bids_dataset/sub-03/ses-post/func/sub-03_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_events.tsv",
"bids_dataset/sub-03/ses-post/func/sub-03_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_bold.nii.gz",
"bids_dataset/sub-03/ses-post/func/sub-03_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-02_events.tsv",
"bids_dataset/sub-03/ses-post/anat/sub-03_ses-post_inplaneT2.nii.gz",
"bids_dataset/sub-03/ses-post/anat/sub-03_ses-post_T1w.nii.gz"
]
def file_list_to_tree(file_list):
# Convert filelist to tree
dirTree = {}
for item in file_list:
path_parts = item.split('/')
sub_obj = dirTree
for part in path_parts:
if not part in sub_obj:
sub_obj[part] = {} if path_parts.index(part) < len(path_parts) - 1 else 'file'
sub_obj = sub_obj[part]
# Convert object lists to arrays
def objToArr (obj):
arr = []
for key in obj:
if obj[key] == 'file':
arr.append({'filename': key})
else:
arr.append({'name': key, 'children': objToArr(obj[key])})
return arr
return objToArr(dirTree)[0]
def setup_download():
global session
session = requests.Session()
# all the requests will be performed as root
session.params = {
'user': 'test@user.com',
'root': True
}
# Convert file list to a tree
dataset = file_list_to_tree(file_list)
# Create a group
test_data.group_id = 'test_group_' + str(int(time.time()*1000))
payload = {
'_id': test_data.group_id
}
payload = json.dumps(payload)
r = session.post(base_url + '/groups', data=payload)
assert r.ok
# Create a project
payload = {
'group': test_data.group_id,
'label': dataset['name'],
'public': False
}
payload = json.dumps(payload)
r = session.post(base_url + '/projects', data=payload)
test_data.pid = json.loads(r.content)['_id']
assert r.ok
log.debug('pid = \'{}\''.format(test_data.pid))
# Crawl file tree
for bidsSubject in dataset['children']:
if 'filename' in bidsSubject:
# Upload project files
files = {'file': (bidsSubject['filename'], 'some,data,to,send'),
'tags': ('', '["project"]')}
r = session.post(base_url + '/projects/' + test_data.pid +'/files', files=files)
elif 'children' in bidsSubject:
# Create subject directories
payload = {
'project': test_data.pid,
'label': bidsSubject['name'],
'subject': {
'code': 'subject'
}
}
payload = json.dumps(payload)
r = session.post(base_url + '/sessions', data=payload)
subjectId = json.loads(r.content)['_id']
test_data.sessions.append(subjectId)
for bidsSession in bidsSubject['children']:
# Create session directories
payload = {
'project': test_data.pid,
'label': bidsSession['name'],
'subject': {
'code': subjectId
}
}
payload = json.dumps(payload)
r = session.post(base_url + '/sessions', data=payload)
sessionId = json.loads(r.content)['_id']
test_data.sessions.append(sessionId)
for bidsModality in bidsSession['children']:
# Create modality directories
payload = {
'session': sessionId,
'label': bidsModality['name'],
}
payload = json.dumps(payload)
r = session.post(base_url + '/acquisitions', data=payload)
modalityId = json.loads(r.content)['_id']
test_data.acquisitions.append(modalityId)
for bidsAcquisition in bidsModality['children']:
# Upload modality files
files = {'file': (bidsAcquisition['filename'], 'some,data,to,send'),
'tags': ('', '["acquisition"]')}
r = session.post(base_url + '/acquisitions/' + modalityId +'/files', files=files)
def teardown_download():
success = True
# remove all the containers created in the test
for acquisitionId in test_data.acquisitions:
r = session.delete(base_url + '/acquisitions/' + acquisitionId)
success = success and r.ok
for sessionId in test_data.sessions:
r = session.delete(base_url + '/sessions/' + sessionId)
success = success and r.ok
r = session.delete(base_url + '/projects/' + test_data.pid)
success = success and r.ok
r = session.delete(base_url + '/groups/' + test_data.group_id)
success = success and r.ok
session.close()
# remove tar files
os.remove('test_download.tar')
os.remove('test_download_symlinks.tar')
if not success:
log.error('error in the teardown. These containers may have not been removed.')
log.error(str(test_data.__dict__))
def download_dataset(symlinks):
# Retrieve a ticket for a batch download
payload = {
'optional': False,
'nodes': [
{
'level': 'project',
'_id': test_data.pid
}
]
}
payload = json.dumps(payload)
r = session.post(base_url + '/download', data=payload, params={'format': 'bids'})
assert r.ok
# Perform the download
ticket = json.loads(r.content)['ticket']
params = {'ticket': ticket}
if symlinks:
params['symlinks'] = True
r = session.get(base_url + '/download', params=params)
assert r.ok
# Save the tar to a file if successful
f = open('test_download.tar' if not symlinks else 'test_download_symlinks.tar', 'w')
f.write(r.content)
f.close()
def get_tar_list(filename):
# Generate List of files in tar
tar_list = []
tar = tarfile.open(filename)
for member in tar.getmembers():
tar_list.append(member.path)
tar.close()
return tar_list
@with_setup(setup_download, teardown_download)
def test_download():
download_dataset(False)
download_dataset(True)
tar_list = get_tar_list('test_download.tar')
tar_list_sym = get_tar_list('test_download_symlinks.tar')
# Sort lists
file_list.sort()
tar_list.sort()
tar_list_sym.sort()
# Compare tar lists to original
assert file_list == tar_list
assert file_list == tar_list_sym
| 42.704724 | 133 | 0.670047 |
acdeee6efd99c01e8910333b4bca5221f7e38fe9 | 1,977 | py | Python | web/web/settings_template.py | webisteme/punkmoney | 79253f8a37c80789e22c5c63eb6c88ccade61286 | [
"MIT"
] | 1 | 2018-10-01T11:41:57.000Z | 2018-10-01T11:41:57.000Z | web/web/settings_template.py | webisteme/punkmoney | 79253f8a37c80789e22c5c63eb6c88ccade61286 | [
"MIT"
] | null | null | null | web/web/settings_template.py | webisteme/punkmoney | 79253f8a37c80789e22c5c63eb6c88ccade61286 | [
"MIT"
] | null | null | null | """
Django settings for web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g3m_9w1!z640!a=&%5_u&-4+ryecpnzjh29=&u*&uzxhfej26('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tracker'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'web.urls'
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| 23.535714 | 71 | 0.729388 |
acdeee8796f6b2979b2c172b3001ee4ea0cc104e | 8,503 | py | Python | tests/test_transaction.py | mascot6699/py-kin-base | b6075892900b67966365aba0c6b392a6d7cd6eab | [
"Apache-2.0"
] | 2 | 2019-06-07T20:06:39.000Z | 2020-09-02T00:41:36.000Z | tests/test_transaction.py | mascot6699/py-kin-base | b6075892900b67966365aba0c6b392a6d7cd6eab | [
"Apache-2.0"
] | 11 | 2019-02-11T09:33:03.000Z | 2020-11-03T17:45:28.000Z | tests/test_transaction.py | mascot6699/py-kin-base | b6075892900b67966365aba0c6b392a6d7cd6eab | [
"Apache-2.0"
] | 6 | 2019-03-19T18:53:03.000Z | 2021-09-29T03:00:44.000Z | # coding: utf-8
import pytest
from kin_base.memo import *
from kin_base.operation import *
from kin_base.transaction import Transaction
from kin_base.keypair import Keypair
from kin_base.transaction_envelope import TransactionEnvelope as Te
class TestTx:
source = 'GDJVFDG5OCW5PYWHB64MGTHGFF57DRRJEDUEFDEL2SLNIOONHYJWHA3Z'
seed = 'SAHPFH5CXKRMFDXEIHO6QATHJCX6PREBLCSFKYXTTCDDV6FJ3FXX4POT'
dest = 'GCW24FUIFPC2767SOU4JI3JEAXIHYJFIJLH7GBZ2AVCBVP32SJAI53F5'
def test_init_raise_redundant_argument(self):
pytest.raises(
TypeError, Transaction, self.source, dummy=[], sequence=1)
def test_init_raise_account_code_wrong(self):
pytest.raises(Exception, Transaction, self.source + "1", sequence=1)
def do(self, network, opts):
tx = Transaction(self.source, **opts)
tx.add_operation(Inflation())
envelope = Te(tx, network_id=network)
signer = Keypair.from_seed(self.seed)
envelope.sign(signer)
envelope_b64 = envelope.xdr()
print(envelope_b64)
return envelope_b64
def test_textMemo_ascii(self, setup):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAAAZAAAAAAAAAACAAAAAAAAAAEAAAAHdGVzdGluZwAAAAABAAAAAAAAAAkAAAAAAAAAAc0+E2MAAABAMQFOqFSB22TugUKMAyF+ReoaNe1eXUeuLgxbJ2fo/FqqSs13aszSTveEpOp+FXdYPWKnFREb6UO8lohSE5JaCQ=='
assert (result == self.do(setup.network, {
'sequence': 2,
'memo': TextMemo('testing'),
}))
def test_textMemo_unicode(self, setup):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAAAZAAAAAAAAAACAAAAAAAAAAEAAAAMdMSTxaF0xKvFhsSjAAAAAQAAAAAAAAAJAAAAAAAAAAHNPhNjAAAAQPbTvBNXbVRC2yLA8BFVBB1IvgIlNykIn9heLQC709Mtq1OBOj222zrF0y07Hbe90iWtjAU98bGBQVSpf8GRUQk='
assert (result == self.do(setup.network, {
'sequence': 2,
'memo': TextMemo('tēštīņģ'),
}))
def test_imprecise_fee(self, setup):
with pytest.raises(NotValidParamError):
self.do(setup.network, {
'sequence': 2,
'memo': TextMemo('testing'),
'fee': 100.54
})
class TestMultiOp:
address = 'GDJVFDG5OCW5PYWHB64MGTHGFF57DRRJEDUEFDEL2SLNIOONHYJWHA3Z'
seed = 'SAHPFH5CXKRMFDXEIHO6QATHJCX6PREBLCSFKYXTTCDDV6FJ3FXX4POT'
accounts = [
{
'address':
'GCKMUHUBYSJNEIPMJ2ZHSXGSI7LLROFM5U43SWMRDV7J23HI63M7RW2D',
'seed': 'SDKGBZFUZZEP3QKAFNLEINQ2MPD5QZJ35ZV7YNS6XCQ4NEHI6ND3ZMWC',
},
{
'address':
'GBG2TM6PGHAWRBVS37MBGOCQ7H7QQH7N2Y2WVUY7IMCEJ6MSF7LWQNIP',
'seed': 'SAMM4N3BI447BUSTHPGO5NRHQY2J5QWECMPVHLXHZ3UKENU52UJ7MJLQ',
},
{
'address':
'GCQEAE6KDHPQMO3AJBRPSFV6FAPFYP27Q3EGE4PY4MZCTIV5RRA3KDBS',
'seed': 'SDWJCTX6T3NJ6HEPDWFPMP33M2UDBPFKUCN7BIRFQYKXQTLO7NGDEVZE',
},
]
amount = "20"
def make_envelope(self, *args, **kwargs):
opts = {'sequence': 2, 'fee': 100 * len(args)}
for opt, value in kwargs.items():
opts[opt] = value
tx = Transaction(self.address, **opts)
for count, op in enumerate(args):
tx.add_operation(op)
envelope = Te(tx, network_id="Test SDF Network ; September 2015")
signer = Keypair.from_seed(self.seed)
envelope.sign(signer)
envelope_b64 = envelope.xdr()
print(envelope_b64)
return envelope_b64
def test_double_create_account(self):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAAAyAAAAAAAAAACAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAlMoegcSS0iHsTrJ5XNJH1ri4rO05uVmRHX6dbOj22fgAAAAAAB6EgAAAAAAAAAAAAAAAAE2ps88xwWiGst/YEzhQ+f8IH+3WNWrTH0MERPmSL9doAAAAAAA9CQAAAAAAAAAAAc0+E2MAAABAnTBDYALsqKbtbmp51tHmwXUm2JNZrbQMQ0JvFKHGdPF3Ez7jgMMG/FTKUNB//g1AirayCz6ZCOWIT6wM91v2Aw=='
assert (result == self.make_envelope(
CreateAccount(
destination=self.accounts[0]['address'],
starting_balance=self.amount,
),
CreateAccount(
destination=self.accounts[1]['address'],
starting_balance="40",
),
))
def test_double_payment(self):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAAAyAAAAAAAAAACAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAAlMoegcSS0iHsTrJ5XNJH1ri4rO05uVmRHX6dbOj22fgAAAAAAAAAAAAehIAAAAAAAAAAAQAAAABNqbPPMcFohrLf2BM4UPn/CB/t1jVq0x9DBET5ki/XaAAAAAAAAAAAAD0JAAAAAAAAAAABzT4TYwAAAEDvIvnBrh8kpVKwaoth1QfHQ5KcQdeBOEGPgOYYenqqZlQuro1mcKAAurp03j6r913klG6bU878h6SN/0GLW34E'
assert (result == self.make_envelope(
Payment(
destination=self.accounts[0]['address'],
asset=Asset.native(),
amount=self.amount,
),
Payment(
destination=self.accounts[1]['address'],
asset=Asset.native(),
amount="40",
),
))
def test_mix_1(self):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAADhAAAAAAAAAACAAAAAAAAAAAAAAAJAAAAAAAAAAAAAAAAlMoegcSS0iHsTrJ5XNJH1ri4rO05uVmRHX6dbOj22fgAAAAAAB6EgAAAAAAAAAABAAAAAE2ps88xwWiGst/YEzhQ+f8IH+3WNWrTH0MERPmSL9doAAAAAAAAAAAAHoSAAAAAAAAAAAIAAAAAAAAAAAAehIAAAAAAoEATyhnfBjtgSGL5Fr4oHlw/X4bIYnH44zIpor2MQbUAAAAAAAAAAAAehIAAAAAAAAAAAAAAAAMAAAABYmVlcgAAAACUyh6BxJLSIexOsnlc0kfWuLis7Tm5WZEdfp1s6PbZ+AAAAAFiZWVyAAAAAE2ps88xwWiGst/YEzhQ+f8IH+3WNWrTH0MERPmSL9doAAAAAACYloAABMsvAAGGoAAAAAAAAAABAAAAAAAAAAQAAAABYmVlcgAAAABNqbPPMcFohrLf2BM4UPn/CB/t1jVq0x9DBET5ki/XaAAAAAFiZWVyAAAAAKBAE8oZ3wY7YEhi+Ra+KB5cP1+GyGJx+OMyKaK9jEG1AAAAAACYloAABMsvAAGGoAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAABYmVlcgAAAACUyh6BxJLSIexOsnlc0kfWuLis7Tm5WZEdfp1s6PbZ+H//////////AAAAAAAAAAcAAAAAlMoegcSS0iHsTrJ5XNJH1ri4rO05uVmRHX6dbOj22fgAAAABYmVlcgAAAAEAAAAAAAAACAAAAACUyh6BxJLSIexOsnlc0kfWuLis7Tm5WZEdfp1s6PbZ+AAAAAAAAAABzT4TYwAAAEDMCZXYi0ZDxQSbtLD2vL5f3Pil6yun1sYFqQHlLL+KNEiAUblloyiuARhOUkFMBr2XfF0ORRew/SAJjcFPmIUC'
assert (result == self.make_envelope(
CreateAccount(
destination=self.accounts[0]['address'],
starting_balance=self.amount,
),
Payment(
destination=self.accounts[1]['address'],
asset=Asset.native(),
amount=self.amount,
),
PathPayment(
destination=self.accounts[2]['address'],
send_asset=Asset.native(),
dest_asset=Asset.native(),
send_max=self.amount,
dest_amount=self.amount,
path=[],
),
ManageOffer(
selling=Asset('beer', self.accounts[0]['address']),
buying=Asset('beer', self.accounts[1]['address']),
amount="100",
price=3.14159,
offer_id=1,
),
CreatePassiveOffer(
selling=Asset('beer', self.accounts[1]['address']),
buying=Asset('beer', self.accounts[2]['address']),
amount="100",
price=3.14159,
), SetOptions(),
ChangeTrust(asset=Asset('beer', self.accounts[0]['address']), ),
AllowTrust(
trustor=self.accounts[0]['address'],
asset_code='beer',
authorize=True,
), AccountMerge(destination=self.accounts[0]['address'], )))
def test_mix_2(self):
result = b'AAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAAABkAAAAAAAAAACAAAAAAAAAAAAAAAEAAAAAAAAAAUAAAAAAAAAAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAABRVVSAAAAAADTUozdcK3X4scPuMNM5il78cYpIOhCjIvUltQ5zT4TYwAAWvMQekAAAAAAAAAAAAcAAAAA01KM3XCt1+LHD7jDTOYpe/HGKSDoQoyL1JbUOc0+E2MAAAABRVVSAAAAAAEAAAAAAAAAAQAAAACUyh6BxJLSIexOsnlc0kfWuLis7Tm5WZEdfp1s6PbZ+AAAAAFFVVIAAAAAANNSjN1wrdfixw+4w0zmKXvxxikg6EKMi9SW1DnNPhNjAABa8xB6QAAAAAAAAAAAAc0+E2MAAABA815/lOfza06zceMWD6YGwsS5D7QGz5x+50WeIqVVmuTxgnLe9jKxhLZDVigtRTYZvndpHdSQ/k7YY9i6zMOsAA=='
assert (result == self.make_envelope(
SetOptions(set_flags=1),
ChangeTrust(asset=Asset('EUR', self.address), limit="1000000000"),
AllowTrust(authorize=True, asset_code='EUR', trustor=self.address),
Payment(
destination=self.accounts[0]['address'],
asset=Asset('EUR', self.address),
amount="1000000000")))
| 50.313609 | 1,028 | 0.69105 |
acdeef3cb5d37881d925b915ee85421947abb6e2 | 105 | py | Python | PythonTeste/aula 8c.py | leonardocsrod/Python-Curso-em-Video | d61e4f1f0779050d176419de2cec0ef539e48cb3 | [
"MIT"
] | null | null | null | PythonTeste/aula 8c.py | leonardocsrod/Python-Curso-em-Video | d61e4f1f0779050d176419de2cec0ef539e48cb3 | [
"MIT"
] | null | null | null | PythonTeste/aula 8c.py | leonardocsrod/Python-Curso-em-Video | d61e4f1f0779050d176419de2cec0ef539e48cb3 | [
"MIT"
] | null | null | null | import random
number = random.random()
numberint = random.randint(1, 10)
print(number)
print(numberint)
| 15 | 33 | 0.761905 |
acdef02f004d974233f6c0077c28de1b297b0191 | 44,369 | py | Python | bin/noobscience.py | newtoallofthis123/python-dev-bin | b066b008c7be73d8d9a076333fdd7c93a8c4c997 | [
"MIT"
] | 3 | 2021-08-04T07:10:52.000Z | 2021-08-04T08:02:43.000Z | bin/noobscience.py | newtoallofthis123/python-dev-bin | b066b008c7be73d8d9a076333fdd7c93a8c4c997 | [
"MIT"
] | null | null | null | bin/noobscience.py | newtoallofthis123/python-dev-bin | b066b008c7be73d8d9a076333fdd7c93a8c4c997 | [
"MIT"
] | null | null | null | import time
import webbrowser
from tkinter import *
import tkinter.messagebox
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
from tkinter.filedialog import askdirectory
from turtle import *
''' Noob Science '''
class NSnote:
gui = Tk()
gui.configure(bg="black")
Width = 300
Height = 300
#TextArea = Text(gui)
MenuBar = Menu(gui)
Applications = Menu(MenuBar, tearoff=0, bg="white", fg="black")
Fun = Menu(MenuBar, tearoff=0, bg="white", fg="black")
Music = Menu(MenuBar, tearoff=0, bg="white", fg="black")
Cli_Tools = Menu(MenuBar, tearoff=0, bg="white", fg="black")
Web = Menu(MenuBar, tearoff=0, bg="white", fg="black")
try:
background = PhotoImage(file="Assets/noobscience.png")
canvas = Canvas(gui, bg="black", width=800, height=480)
canvas.pack(fill="both", expand=True, anchor="nw")
canvas.create_image(380,245, image = background)
except:
pass
#ScrollBar = Scrollbar(TextArea)
#file = None
def __init__(self,**kwargs):
try:
self.gui.wm_iconbitmap("Assets/Gallery/icon.ico")
except:
pass
try:
from PIL import Image,ImageTk
bg = ImageTk.PhotoImage(file="Assets/noobscience.png")
def resizer(e):
global bg1, resized_bg, new_bg
bg1 = Image.open("Assets/noobscience.png")
resized_bg = bg1.resize((e.width, e.height), Image.ANTIALIAS)
bg = ImageTk.PhotoImage(file=resized_bg)
except:
pass
try:
self.gui.resizable(False,False)
except:
pass
# Win Size
try:
self.Width = kwargs['width']
except KeyError:
pass
try:
self.Height = kwargs['height']
except KeyError:
pass
self.gui.title("NoobScience")
screenWidth = self.gui.winfo_screenwidth()
screenHeight = self.gui.winfo_screenheight()
# For left-alling
left = (screenWidth / 2) - (self.Width / 2)
# For right-allign
top = (screenHeight / 2) - (self.Height /2)
# For top and bottom
self.gui.geometry('%dx%d+%d+%d' % (self.Width,self.Height,left, top))
self.gui.grid_rowconfigure(0, weight=1)
self.gui.grid_columnconfigure(0, weight=1)
# Add controls (widget)
#self.TextArea.grid(sticky = N + E + S + W)
# Controls
self.Applications.add_command(label="Wiki",command=self.wiki)
self.Applications.add_command(label="Calendar",command=self.calendar)
self.Applications.add_command(label="Url Shorterner",command=self.shorturl)
self.Applications.add_command(label="Song Player",command=self.song)
self.Applications.add_command(label="Clock",command=self.clock)
self.Applications.add_command(label="Analog Clock",command=self.analog_clock)
self.Applications.add_command(label="Qr Code Generator",command=self.qr)
self.Applications.add_command(label="Note",command=self.note)
self.Applications.add_command(label="Calculator",command=self.cal)
self.Applications.add_command(label="Password Generator",command=self.pwdgen)
self.Applications.add_separator()
self.Applications.add_command(label="Exit",command=self.quit1)
self.MenuBar.add_cascade(label="Applications",menu=self.Applications)
self.Fun.add_command(label="Tech Joke",command=self.joke)
self.Fun.add_command(label="RockPaperScissor",command=self.rps)
self.Fun.add_command(label="Snakes",command=self.snakes)
self.Fun.add_command(label="Ping Pong",command=self.pong)
self.Fun.add_command(label="Tic Tac Toe",command=self.tictactoe)
self.Fun.add_command(label="Flappy Bird",command=self.flappy_bird)
self.MenuBar.add_cascade(label="Fun",menu=self.Fun)
self.Music.add_command(label="Play Relaxing Music",command=self.play)
self.Music.add_command(label="Pause",command=self.pause)
self.Music.add_command(label="Stop",command=self.stop)
self.MenuBar.add_cascade(label="Music",menu=self.Music)
'''self.Web.add_command(label="Source",command=self.source)
self.Web.add_command(label="Projects",command=self.projects)
self.Web.add_command(label="Website",command=self.website)
self.Web.add_command(label="Github",command=self.github)
self.Web.add_command(label="Youtube",command=self.youtube)
self.Web.add_command(label="Blog",command=self.blog)
self.MenuBar.add_cascade(label="Resources",menu=self.Web)'''
self.Cli_Tools.add_command(label="Alaram",command=self.alarm)
self.Cli_Tools.add_command(label="Wikid",command=self.wikid)
self.Cli_Tools.add_command(label="noob",command=self.noob)
#self.Cli_Tools.add_command(label="Password Generator",command=self.pwd)
self.Cli_Tools.add_command(label="Google Search",command=self.google)
self.MenuBar.add_cascade(label="Cli-Tools", menu=self.Cli_Tools)
self.Cli_Tools.add_command(label="Open Links",command=self.links)
self.Cli_Tools.add_command(label="Youtube Downloader",command=self.yt_dl)
self.gui.config(menu=self.MenuBar)
#self.ScrollBar.pack(side=RIGHT,fill=Y)
# Scrollbar will adjust automatically according to the content
#self.ScrollBar.config(command=self.TextArea.yview)
#self.TextArea.config(yscrollcommand=self.ScrollBar.set)
def wiki(self):
import wikipedia
from tkinter.messagebox import showinfo
wiki = Tk()
wiki.title('WIKIPEDIA SEARCH')
wiki.iconbitmap("Assets/Gallery/icon.ico")
wiki.geometry('200x70')
def wikid() :
search = entry.get()
wikisum = wikipedia.summary(search)
showinfo(search,wikisum)
label = Label(wiki,text="Wikipedia Search :")
label.grid(row=0,column=0)
entry = Entry(wiki)
entry.grid(row=1,column=0)
button = Button(wiki,text="Search",command=wikid)
button.grid(row=1,column=1,padx=10)
wiki.mainloop()
wiki.quit()
def calendar(self):
import calendar
from datetime import datetime
gui = Tk()
gui.geometry("400x300")
gui.title("NS-Calendar")
gui.iconbitmap("Assets/Gallery/icon.ico")
gui.configure(bg="#4A3B52")
gui.resizable(False,False)
def cal():
month_int = int(month.get())
year_int = int(year.get())
cal = calendar.month(year_int, month_int)
textfield.delete(0.0, END)
textfield.insert(INSERT, cal)
label1 = Label(gui, text="Month:", bg="#C5FFB8", font=("Arial", 18),)
label1.grid(row=0, column=5)
label2 = Label(gui, text="Year:", bg="#C5FFB8", font=("Arial", 18),)
label2.grid(row=0, column=6)
month = Spinbox(gui, from_=1, to=12, width=8, bg="#FA8574")
month.grid(row=1, column=5, padx=5)
year = Spinbox(gui, from_=2000, to=2100, width=10, bg="#FA8574")
year.grid(row=1, column=6, padx=10)
button = Button(gui, text="Get The Calendar", command=cal, fg="black", bg="#FDA600")
button.grid(row=1, column=7, padx=10)
textfield = Text(gui, width=20, height=10, fg="black", bg="#FFC400")
textfield.grid(row=2, columnspan=12, padx=5, pady=5)
gui.mainloop()
def shorturl(self):
import pyperclip
import pyshorteners
import tkinter as tk
def processor():
shorten = pyshorteners.Shortener()
shortening = shorten.gitio.short(url.get())
short_url.set(shortening)
def copy():
pyperclip.copy( short_url.get())
if __name__=="__main__":
gui = tk.Tk()
gui.title("IJ_URL")
gui.geometry("600x600")
gui.configure(bg="black")
gui.iconbitmap("Assets/Gallery/icon.ico")
url = StringVar()
short_url= StringVar()
label = tk.Label(
gui,
text = "IJ URL Shortner",
font=("Arial", 36),
bg="black",
fg="red",
).pack(fill=tk.X, pady=2)
entry = tk.Entry(
gui,
textvariable=url, width =100).pack(pady=5)
button = tk.Button(
gui,
text = "Give URL",
font=("Arial", 18),
bg="yellow",
fg="black",
command =processor,).pack(pady=2)
label = tk.Label(
gui,
text = "Shorten URL using git.io",
font=("Arial", 36),
bg="black",
fg="red",
).pack(fill=tk.X, pady=2)
entry = tk.Entry(
gui,
textvariable=short_url, width =100).pack(pady=5)
button = tk.Button(
gui,
text = "Copy URL",
font=("Arial", 18),
bg="yellow",
fg="black",
command =copy,).pack(pady=2)
gui.mainloop()
def song(self):
import os
from tkinter.filedialog import askdirectory
# pygame is used in this program, you can install it by using pip install pygame
import pygame
# We will be using mtagrn.id3 also
from mutagen.id3 import ID3
# We will be using tkinter to build the gui
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
import tkinter.messagebox
# We will use the web Browser to open the web browser
import webbrowser
# Building the GUI
gui = Tk()
gui.minsize(546,580)
gui.configure(bg="#002240")
gui.title("NSPlayer")
gui.geometry('500x500')
gui.iconbitmap("Assets/Gallery/icon.ico")
gui.resizable(False,False)
listofsongs = []
realnames = []
# Defining a variable to hold song name
v = StringVar()
songlabel = Label(gui,textvariable=v,width=72, bg="#002240", fg="white", font=('Cascadia Code', 14))
# Song Indexing
index = 0
# Function to choose directory
def directorychooser():
directory = askdirectory()
os.chdir(directory)
# Types of files to import, change after comment to add more type of audio files
for files in os.listdir(directory):
if files.endswith(".mp3"):
# Files
realdir = os.path.realpath(files)
audio = ID3(realdir)
realnames.append(audio['TIT2'].text[0])
listofsongs.append(files)
# Intiating pygame mixer
pygame.mixer.init()
pygame.mixer.music.load(listofsongs[0])
#pygame.mixer.music.play()
# Calling upon the directorychooser function
directorychooser()
# To update song labels
def updatelabel():
global index
global songname
v.set(realnames[index])
#return songname
# To get Next Song
def nextsong(event):
global index
index += 1
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
updatelabel()
# To get Previous Song
def prevsong(event):
global index
index -= 1
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
updatelabel()
# To stop the song
def stopsong(event):
pygame.mixer.music.stop()
v.set("")
#return songname
# To Play Song in order
def playsong(event):
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.play()
v.set("")
updatelabel()
# To pause Song
def pausesong(event):
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.pause()
v.set("")
updatelabel()
# To unpause Song
def unpausesong(event):
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.unpause()
v.set("")
updatelabel()
# To rewind the Song
def rewindsong(event):
pygame.mixer.music.load(listofsongs[index])
pygame.mixer.music.rewind()
v.set("")
updatelabel()
# To open a pop up window containing info
def NS_info(event):
tkinter.messagebox.showinfo('About NoobScience', NS_text)
webbrowser.open('https://newtoallofthis123.github.io/About')
# To ask if the user wants to visit the NSPlayer Website
def NS_title():
result = tkinter.messagebox.askquestion('Fork', 'Do you want to go the NSPlayer Website?')
if result=='yes':
webbrowser.open('https://newtoallofthis123.github.io/NSPlayer')
else:
print("Okay")
# The Text Pop up box info
NS_text = "This Project is built by NoobScience using python and pygame. This is a beginner-friendly project and you can use this to learn pygame as well. This project is registered under MIT lisence (copy right NoobScience 2021), which makes it open-source. You are free to use it however you wish. Check out the code at my repo: https://github.com/newtoallofthis123 , Any issues, be sure to tell me at https://github.com/newtoallofthis123/issues , Check out the website at https://newtoallofthis123.github.io/NSPlayer, To troubleshoot any problems, check out the documentation at the website. Be sure to get pygame from 'pip install pygame'"
#return songname
# The Title Button
titleButton = Button(gui,text='NSPlayer', bg="#FFE600", fg="#002240", font=("Cascadia Code", 20), command = NS_title)
titleButton.pack(padx=3, pady=12)
# The song list
listbox = Listbox(gui, bg="#002240", fg="white", width="76", font=("Cascadia Code", 12),)
listbox.pack(padx=5, pady=10)
#listofsongs.reverse()
realnames.reverse()
for items in realnames:
listbox.insert(0,items)
realnames.reverse()
#listofsongs.reverse()
# Play Button
playbutton = Button(gui,text='|> Play Music', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
playbutton.pack()
playbutton.place(x=34, y=330)
# Pause Button
pausebutton = Button(gui,text='|: Pause Music', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
pausebutton.pack()
pausebutton.place(x=34, y=380)
# Unpause Button
unpausebutton = Button(gui,text=':: Unpause Music', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
unpausebutton.pack()
unpausebutton.place(x=300, y=380)
# Rewind Button
rewindbutton = Button(gui,text='0 Rewind Music', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
rewindbutton.pack()
rewindbutton.place(x=34, y=480)
# Next Song Button
nextbutton = Button(gui,text = '--> Next Song', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
nextbutton.pack()
nextbutton.place(x=300, y=430)
# Previous Song Button
previousbutton = Button(gui,text = '<-- Previous Song', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
previousbutton.pack()
previousbutton.place(x=34, y=430)
# Stop Song Button
stopbutton = Button(gui,text='|| Stop Music', font=("Cascadia Code", 12), width=20, bg="#00FFC0")
stopbutton.pack()
stopbutton.place(x=300, y=330)
# Rewind Button
version = Label(gui,text='v.0.1', font=("Cascadia Code", 8), width=20, bg="#00FFC0")
version.pack()
version.place(x=410, y=68)
# Info button
infobutton = Button(gui,text = "By NoobScience", font=("Cascadia Code", 12), width=20, bg="#00FFC0", fg="#002240")
infobutton.pack(padx=1, pady=1)
infobutton.place(x=300, y=480)
# Now Playing text
label = Label(gui, text = "------Now Playing------", font=("Cascadia Code", 12), width=25)
label.pack(pady=10)
label.place(x=144, y=524)
# Defining Button Commands
playbutton.bind("<Button-1>",playsong)
nextbutton.bind("<Button-1>",nextsong)
previousbutton.bind("<Button-1>",prevsong)
stopbutton.bind("<Button-1>",stopsong)
infobutton.bind("<Button-1>",NS_info)
pausebutton.bind("<Button-1>",pausesong)
unpausebutton.bind("<Button-1>",unpausesong)
# Song playing Name
songlabel.pack()
songlabel.place(x=-124, y=550)
gui.mainloop()
def clock(self):
from time import strftime
gui = Tk()
gui.title('Digital clock Widget by NoobScience')
gui.configure(bg='#002240')
gui.resizable(False,False)
gui.iconbitmap("Assets/Gallery/icon.ico")
#photo = PhotoImage(file = "/clock.png")
#gui.iconphoto(False, photo)
def clocktime():
tick = strftime('%H:%M:%S %p')
label.config(text =tick)
label.after(1000, clocktime)
label = Label(gui, font =('sans', 80), background = '#002240', foreground = '#FFC900')
label.pack(anchor= 'center')
clocktime()
mainloop
def test(self):
return
def analog_clock(self):
from datetime import datetime
def jump(distanz, winkel=0):
penup()
right(winkel)
forward(distanz)
left(winkel)
pendown()
def hand(laenge, spitze):
fd(laenge*1.15)
rt(90)
fd(spitze/2.0)
lt(120)
fd(spitze)
lt(120)
fd(spitze)
lt(120)
fd(spitze/2.0)
def make_hand_shape(name, laenge, spitze):
reset()
jump(-laenge*0.15)
begin_poly()
hand(laenge, spitze)
end_poly()
hand_form = get_poly()
register_shape(name, hand_form)
def clockface(radius):
reset()
pensize(7)
for i in range(60):
jump(radius)
if i % 5 == 0:
fd(25)
jump(-radius-25)
else:
dot(3)
jump(-radius)
rt(6)
def setup():
global second_hand, minute_hand, hour_hand, writer
mode("logo")
make_hand_shape("second_hand", 125, 25)
make_hand_shape("minute_hand", 130, 25)
make_hand_shape("hour_hand", 90, 25)
clockface(160)
second_hand = Turtle()
second_hand.shape("second_hand")
second_hand.color("gray20", "gray80")
minute_hand = Turtle()
minute_hand.shape("minute_hand")
minute_hand.color("blue1", "red1")
hour_hand = Turtle()
hour_hand.shape("hour_hand")
hour_hand.color("blue3", "red3")
for hand in second_hand, minute_hand, hour_hand:
hand.resizemode("user")
hand.shapesize(1, 1, 3)
hand.speed(0)
ht()
writer = Turtle()
#writer.mode("logo")
writer.ht()
writer.pu()
writer.bk(85)
def wochentag(t):
wochentag = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
return wochentag[t.weekday()]
def datum(z):
monat = ["Jan.", "Feb.", "Mar.", "Apr.", "May", "June",
"July", "Aug.", "Sep.", "Oct.", "Nov.", "Dec."]
j = z.year
m = monat[z.month - 1]
t = z.day
return "%s %d %d" % (m, t, j)
def tick():
t = datetime.today()
sekunde = t.second + t.microsecond*0.000001
minute = t.minute + sekunde/60.0
stunde = t.hour + minute/60.0
try:
tracer(False) # Terminator can occur here
writer.clear()
writer.home()
writer.forward(65)
writer.write(wochentag(t),
align="center", font=("Courier", 14, "bold"))
writer.back(150)
writer.write(datum(t),
align="center", font=("Courier", 14, "bold"))
writer.forward(85)
tracer(True)
second_hand.setheading(6*sekunde) # or here
minute_hand.setheading(6*minute)
hour_hand.setheading(30*stunde)
tracer(True)
ontimer(tick, 100)
except Terminator:
pass # turtledemo user pressed STOP
def main():
tracer(False)
setup()
tracer(True)
tick()
return "EVENTLOOP"
if __name__ == "__main__":
mode("logo")
msg = main()
print(msg)
mainloop()
def qr(self):
import pyqrcode
import tkinter as tk
#from tkinter import *
from PIL import Image, ImageTk
from tkinter import filedialog
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
import os
def qr_processor():
image = pyqrcode.create(data1.get())
img_name = str(imgname.get()) + ".png"
image.png((img_name), scale=10)
def open_code():
try:
file1= filedialog.askopenfilename(initialdir="/", title="Select Png files Only", )
img1 = ImageTk.PhotoImage(Image.open(file))
my_label = Label(image=img1).pack()
os.system('"%s"' %file)
except:
tkinter.showerror("No file found", "Create the file first.")
def open_img():
try:
name = str(imgname.get()) + ".png"
os.system('"%s"' %name)
except:
tkinter.showerror("No file found", "Create the file first.")
if __name__=="__main__":
gui = tk.Tk(className = "NS QR")
gui.title("NS QR Code Generator")
gui.iconbitmap("Assets/Gallery/icon.ico")
gui.geometry("600x600")
gui.configure(bg="black")
data1 = StringVar()
imgname = StringVar()
label = tk.Label(
gui,
text = "Qr-Code Generator",
font = ("Arial", 28),
bg = "#40FAE4",
fg = "black",
).pack(pady=2)
label = tk.Label(
gui,
text = "",
font = ("Arial", 12),
bg = "black",
fg = "black",
).pack(pady=2)
label = tk.Label(
gui,
text = "Enter Data",
font = ("Arial", 18),
bg = "yellow",
fg = "black",
).pack(pady=2)
entry = tk.Entry(
gui,
font=("Cascadia", 16),
bg="#002240",
fg="white",
textvariable=data1, width =30).pack()
label = tk.Label(
gui,
text = "QR-Code Name",
font=("Arial", 18),
bg="#FA4062",
fg="black",
).pack(pady=2)
entry = tk.Entry(
gui,
font=("Cascadia", 16),
bg="#002240",
fg="white",
textvariable=imgname, width =30,
).pack()
label = tk.Label(
gui,
text = "",
font = ("Arial", 16),
bg = "black",
fg = "black",
).pack(pady=2)
button = tk.Button(
gui,
text = "Generate QR-Code",
font=("Arial", 18),
bg="#23FD71",
fg="black",
command =qr_processor,).pack(pady=2)
label = tk.Label(
gui,
text = "",
font = ("Arial", 8),
bg = "black",
fg = "black",
).pack(pady=2)
#canvas= Canvas(gui, width= 150, height= 150)
#canvas.pack()
#img= ImageTk.PhotoImage(Image.open("ns.png"))
#canvas.create_image(10,10,anchor=NW,image=img)
label = tk.Label(
gui,
text = "You will find the QR-Code in the directory you stored this program",
font = ("Arial", 12),
bg = "black",
fg = "#C5FFB8",
).pack(fill=tk.X, pady=2)
button = tk.Button(
gui,
text = "Open QR-Code",
font = ("Arial", 12),
bg = "#C5FFB8",
fg = "black",
command = open_img
).pack(pady=2)
label = tk.Label(
gui,
text = "",
font = ("Arial", 56),
bg = "black",
fg = "black",
).pack(pady=2)
label = tk.Label(
gui,
text = "Made by NoobScience",
font = ("Arial", 16),
bg = "black",
fg = "#C5FFB8",
).pack(fill=tk.X, pady=2)
gui.mainloop()
def note(self):
class NSnote:
gui = Tk()
gui.configure(bg="black")
Width = 300
Height = 300
TextArea = Text(gui, font=("Cascadia", 18), bg="black", fg="white",)
MenuBar = Menu(gui)
FileMenu = Menu(MenuBar, tearoff=0, bg="white", fg="black")
EditMenu = Menu(MenuBar, tearoff=0, bg="white", fg="black")
HelpMenu = Menu(MenuBar, tearoff=0, bg="white", fg="black")
ScrollBar = Scrollbar(TextArea)
file = None
def __init__(self,**kwargs):
try:
self.gui.wm_iconbitmap("Assets/Gallery/icon.ico")
except:
pass
# Win Size
try:
self.Width = kwargs['width']
except KeyError:
pass
try:
self.Height = kwargs['height']
except KeyError:
pass
self.gui.title("Untitled - NSnote")
screenWidth = self.gui.winfo_screenwidth()
screenHeight = self.gui.winfo_screenheight()
# For left-alling
left = (screenWidth / 2) - (self.Width / 2)
# For right-allign
top = (screenHeight / 2) - (self.Height /2)
# For top and bottom
self.gui.geometry('%dx%d+%d+%d' % (self.Width,self.Height,left, top))
self.gui.grid_rowconfigure(0, weight=1)
self.gui.grid_columnconfigure(0, weight=1)
# Add controls (widget)
self.TextArea.grid(sticky = N + E + S + W)
# Controls
self.FileMenu.add_command(label="New",command=self.newFile)
self.FileMenu.add_command(label="Open",command=self.openFile)
self.FileMenu.add_command(label="Save",command=self.saveFile)
self.FileMenu.add_separator()
self.FileMenu.add_command(label="Exit",command=self.quitApplication)
self.MenuBar.add_cascade(label="File",menu=self.FileMenu)
self.EditMenu.add_command(label="Cut",command=self.cut)
self.EditMenu.add_command(label="Copy",command=self.copy)
self.EditMenu.add_command(label="Paste",command=self.paste)
self.MenuBar.add_cascade(label="Edit",menu=self.EditMenu)
self.HelpMenu.add_command(label="About NSnote",command=self.showAbout)
self.HelpMenu.add_command(label="NoobScience Website",command=self.website)
self.HelpMenu.add_command(label="NSnote Website",command=self.NSnoteWeb)
self.HelpMenu.add_command(label="Source",command=self.source)
self.HelpMenu.add_command(label="Check out my other Projects",command=self.Projects)
self.MenuBar.add_cascade(label="Resources", menu=self.HelpMenu)
self.HelpMenu.add_command(label="NoobScience",command=self.NoobScience)
self.gui.config(menu=self.MenuBar)
self.ScrollBar.pack(side=RIGHT,fill=Y)
# Scrollbar will adjust automatically according to the content
self.ScrollBar.config(command=self.TextArea.yview)
self.TextArea.config(yscrollcommand=self.ScrollBar.set)
def quitApplication(self):
self.gui.destroy()
# exit()
def showAbout(self):
showinfo("NSnote","NoobScience")
def openFile(self):
self.file = askopenfilename(defaultextension=".txt", filetypes=[("All Files","*.*"),("Text Documents","*.txt")])
if self.file == "":
self.file = None
else:
self.gui.title(os.path.basename(self.file) + " - NSnote")
self.TextArea.delete(1.0,END)
file = open(self.file,"r")
self.TextArea.insert(1.0,file.read())
file.close()
def newFile(self):
self.gui.title("Untitled - NSnote")
self.file = None
self.TextArea.delete(1.0,END)
def saveFile(self):
if self.file == None:
# Save as new file
self.file = asksaveasfilename(initialfile='Untitled.txt',defaultextension=".txt",filetypes=[("All Files","*.*"), ("Text Documents","*.txt")])
if self.file == "":
self.file = None
else:
file = open(self.file,"w")
file.write(self.TextArea.get(1.0,END))
file.close()
self.gui.title(os.path.basename(self.file) + " - NSnote")
else:
file = open(self.file,"w")
file.write(self.thisTextArea.get(1.0,END))
file.close()
def cut(self):
self.TextArea.event_generate("<<Cut>>")
def copy(self):
self.TextArea.event_generate("<<Copy>>")
def paste(self):
self.TextArea.event_generate("<<Paste>>")
def website(self):
result = tkinter.messagebox.askquestion('Website', 'Do you want to vist NoobScience Website?')
if result == 'yes':
webbrowser.open('https://newtoallofthis123.github.io/About')
else:
print('okay, it is https://newtoallofthis123.github.io/About by the way')
def NSnoteWeb(self):
result = tkinter.messagebox.askquestion('NSnote Website', 'Do you want to vist the NSnote Website?')
if result == 'yes':
webbrowser.open('https://newtoallofthis123.github.io/NSnote')
else:
print('okay, it is https;//newtoallofthis123.github.io/NSnote by the wat')
def source(self):
result = tkinter.messagebox.askquestion('NSnote Source', 'Do you want to fork or look at tthe NSnote Website?')
if result == 'yes':
webbrowser.open('https://github.com/newtoallofthis123/NSnote')
else:
print('okay, it is https://github.com/newtoallofthis123/NSnote, by the way')
def Projects(self):
result = tkinter.messagebox.askquestion('Projects', 'Do you want to check out some of my other projects?')
if result == 'yes':
webbrowser.open('https://github.com/newtoallofthis123')
else:
print('okay, it is https://github.com/newtoallofthis123, by the way')
def NoobScience(self):
showinfo("About NoobScience", word_About)
def run(self):
self.gui.mainloop()
word_About = str("I am currently learning to program for my IT journey and to practise, I make simple projects like this that are quite uable and light weight. Check me out at https://newtoallofthis123.github.io/About")
print("NSnote has started with the theme, if you don't see it, press alt+tab and check all the tabs")
note = NSnote(width=800,height=500)
note.run()
print("A Project by NoobScience, check me out at https://newtoallofthis123.github.io/About")
def cal(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Calculator', 'Do you want to open the calculator')
if result=='yes':
import cal
else:
tkinter.messagebox.showinfo('Okay', 'Calculator not opened')
def joke(self):
import pyjokes
gui = Tk()
gui.title("Joke")
gui.geometry('800x90')
gui.config(bg="#002240")
gui.iconbitmap("Assets/Gallery/icon.ico")
gui.resizable(True, False)
j = pyjokes.get_joke()
joke = StringVar(gui, value = j)
label = Label(gui, text = "Joke",font=("Cascadia", 24), fg="white", bg="#002240").pack()
entry = Entry(gui, textvariable = joke, width=120, font=("Cascadia", 12), fg="white", bg="#002240").pack(fill=X)
label = Label(gui, text = "By NoobScience",font=("Cascadia", 16), fg="white", bg="#002240").pack()
gui.mainloop()
def rps(self):
import random
from tkinter import ttk
import time
gui = Tk()
gui.title("Rock Paper Scissor")
gui.iconbitmap("Assets/Gallery/icon.ico")
gui.geometry("400x400")
gui.config(bg="#002240")
#choice = StringVar(gui)
#choice.set("Rock Paper or Scissor")
def user_op(e):
if op.get() == "Rock":
user = 1
if op.get() == "Paper":
user = 2
if op.get() == "Scissor":
user = 3
user_text = "You Choose " + op.get() + ""
user_label = Label(gui, text=user_text, fg="black", bg="#FFEC47", font=("Cascadia", 16)).pack()
label = Label(gui, text="", fg="black", bg="#002240", font=("Cascadia", 4)).pack()
#label = Label(gui, text="Computer is procressing...", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
comp_op = int(random.randint(1, 3))
if comp_op == 1:
com = "Rock"
if comp_op == 2:
com = "Paper"
if comp_op == 3:
com = "Scissor"
comp_text = "The Computer choose " + com + ""
comp_label = Label(gui, text=comp_text, fg="black", bg="#75EBFF", font=("Cascadia", 16)).pack()
label = Label(gui, text="", fg="black", bg="#002240", font=("Cascadia", 4)).pack()
#label = Label(gui, text="Processing Options...", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
if user == comp_op:
user_label = Label(gui, text="It's a Tie,try again", fg="black", bg="#23FF8F", font=("Cascadia", 12)).pack()
label = Label(gui, text="----------------------------", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
if user == 1 and comp_op == 3 or user == 3 and comp_op == 2 or user == 2 and comp_op == 1:
user_label = Label(gui, text="You Won.Play Again", fg="black", bg="#23FF8F", font=("Cascadia", 16)).pack()
label = Label(gui, text="----------------------------", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
if user == 3 and comp_op == 1 or user == 2 and comp_op == 3 or user == 1 and comp_op == 2:
user_label = Label(gui, text="You Lost. try Again", fg="black", bg="#23FF8F", font=("Cascadia", 16)).pack()
label = Label(gui, text="----------------------------", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
op_list = ["Rock", "Paper", "Scissor",]
label = Label(gui, text="Rock Paper Scissor", fg="white", bg="#002240", font=("Cascadia", 18)).pack()
#label = Label(gui, text="choose an option below", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
op = ttk.Combobox(gui, value=op_list, font=("Cascadia", 16))
op.pack()
#op.current(3)
op.bind("<<ComboboxSelected>>", user_op)
label = Label(gui, text="", fg="black", bg="#002240", font=("Cascadia", 4)).pack()
label = Label(gui, text="by NoobScience", fg="white", bg="#002240", font=("Cascadia", 14)).pack()
label = Label(gui, text="", fg="black", bg="#002240", font=("Cascadia", 4)).pack()
def processor():
return
gui.mainloop()
def play(self):
import pygame
pygame.mixer.init()
pygame.mixer.music.load("D:\Songs\kelsea ballarini\Kelsea Ballerini - Dibs (Official Music Video).mp3")
pygame.mixer.music.play()
def pause(self):
import pygame
pygame.mixer.init()
pygame.mixer.music.load("D:\Songs\kelsea ballarini\Kelsea Ballerini - Dibs (Official Music Video).mp3")
pygame.mixer.music.pause()
def stop(self):
import pygame
pygame.mixer.init()
pygame.mixer.music.load("D:\Songs\kelsea ballarini\Kelsea Ballerini - Dibs (Official Music Video).mp3")
pygame.mixer.music.stop()
def actress(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Actress gallery', 'Do you want to open Actresses')
if result=='yes':
import actress
else:
tkinter.messagebox.showinfo('Okay', 'Actress gallery not opened')
def snakes(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Snakes', 'Do you want to open the snakes game')
if result=='yes':
import snake
else:
tkinter.messagebox.showinfo('Okay', 'Snakes not opened')
def pong(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Ping Pong', 'Do you want to open Ping Pong Game?')
if result=='yes':
import pong
else:
tkinter.messagebox.showinfo('Okay', 'Ping Pong not opened')
def alarm(self):
import subprocess
subprocess.call([r'alarm.bat'])
def wikid(self):
import subprocess
subprocess.call([r'wikid.bat'])
def noob(self):
import subprocess
subprocess.call([r'noob.bat'])
def pwd(self):
return
def google(self):
import subprocess
subprocess.call([r'google.bat'])
def links(self):
import subprocess
subprocess.call([r'links.bat'])
def pwdgen(self):
import random
import string
gui = Tk()
gui.title("Password Generator")
gui.resizable(False, False)
try:
gui.iconbitmap("Assets/Gallery/icon.ico")
except:
pass
gui.config(bg = "black")
length = Spinbox(gui, from_=1, to=160, width=8, bg="#B700FF", fg="black", font=("Cascadia", 8))
length.grid(row=1, column=5, padx=1)
def password():
length_int = int(length.get())
lower = string.ascii_lowercase
upper = string.ascii_uppercase
digits = string.digits
symbols = string.punctuation
whole = lower + upper + digits + symbols
pwd1 = random.sample(whole,length_int)
password = "".join(pwd1)
textfield = Text(gui, width=20, height=1,bg="#01FF7D", fg="black", font=("Cascadia", 12))
textfield.grid(row=2, columnspan=12, padx=1, pady=1)
textfield.delete(0.0, END)
textfield.insert(INSERT, password)
button = Button(gui, text="Generate Password", command=password, bg="#F9FF47", fg="black", font=("Cascadia", 8))
button.grid(row=1, column=7, padx=10)
gui.mainloop()
def quit1(self):
self.gui.quit
def tictactoe(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Tic Tac Toe', 'Do you want to open Tic Tac Toe Game?')
if result=='yes':
import tictactoe
else:
tkinter.messagebox.showinfo('Okay', 'Tic Tac Toe not opened')
def flappy_bird(self):
from tkinter.messagebox import showinfo
from tkinter.messagebox import askquestion
result = tkinter.messagebox.askquestion('Flappy Bird', 'Do you want to open Flappy Bird Game?')
if result=='yes':
import flappy_bird
else:
tkinter.messagebox.showinfo('Okay', 'Flappy Bird not opened')
def yt_dl(self):
import subprocess
subprocess.call([r'yt-dl.bat'])
def run(self):
#self.gui.bind('<Configure>', resizer)
self.gui.mainloop()
word_About = str("I am currently learning to program for my IT journey and to practise, I make simple projects like this that are quite uable and light weight. Check me out at https://newtoallofthis123.github.io/About")
print("NSnote has started with the theme, if you don't see it, press alt+tab and check all the tabs")
note = NSnote(width=800,height=500)
note.run()
print("A Project by NoobScience, check me out at https://newtoallofthis123.github.io/About")
| 37.097826 | 651 | 0.524601 |
acdef119403489294b4d5d4029b6da85887a5b93 | 2,626 | py | Python | mythril/analysis/security.py | QuanZhang-William/mythril-classic | f72387f749a142d8a4e6d1586365d6e9e96a2fed | [
"MIT"
] | 6 | 2021-02-13T05:03:32.000Z | 2021-09-19T14:57:58.000Z | mythril/analysis/security.py | cryptobarbossa/mythril-classic | 5dd544d301238db2bc536d7cee69b96e9a15e9c4 | [
"MIT"
] | null | null | null | mythril/analysis/security.py | cryptobarbossa/mythril-classic | 5dd544d301238db2bc536d7cee69b96e9a15e9c4 | [
"MIT"
] | 2 | 2020-05-26T15:03:20.000Z | 2021-07-29T09:09:05.000Z | from collections import defaultdict
from ethereum.opcodes import opcodes
from mythril.analysis import modules
import pkgutil
import importlib.util
import logging
OPCODE_LIST = [c[0] for _, c in opcodes.items()]
def reset_callback_modules():
modules = get_detection_modules("callback")
for module in modules:
module.detector._issues = []
def get_detection_module_hooks():
hook_dict = defaultdict(list)
_modules = get_detection_modules(entrypoint="callback")
for module in _modules:
for op_code in map(lambda x: x.upper(), module.detector.hooks):
if op_code in OPCODE_LIST:
hook_dict[op_code].append(module.detector.execute)
elif op_code.endswith("*"):
to_register = filter(lambda x: x.startswith(op_code[:-1]), OPCODE_LIST)
for actual_hook in to_register:
hook_dict[actual_hook].append(module.detector.execute)
else:
logging.error(
"Encountered invalid hook opcode %s in module %s",
op_code,
module.detector.name,
)
return dict(hook_dict)
def get_detection_modules(entrypoint, include_modules=()):
include_modules = list(include_modules)
_modules = []
if not include_modules:
for loader, module_name, _ in pkgutil.walk_packages(modules.__path__):
if module_name != "base":
module = importlib.import_module(
"mythril.analysis.modules." + module_name
)
if module.detector.entrypoint == entrypoint:
_modules.append(module)
else:
for module_name in include_modules:
module = importlib.import_module("mythril.analysis.modules." + module_name)
if module.__name__ != "base" and module.detector.entrypoint == entrypoint:
_modules.append(module)
logging.info("Found %s detection modules", len(_modules))
return _modules
def fire_lasers(statespace, module_names=()):
logging.info("Starting analysis")
issues = []
for module in get_detection_modules(
entrypoint="post", include_modules=module_names
):
logging.info("Executing " + module.detector.name)
issues += module.detector.execute(statespace)
for module in get_detection_modules(
entrypoint="callback", include_modules=module_names
):
logging.debug("Retrieving results for " + module.detector.name)
issues += module.detector.issues
reset_callback_modules()
return issues
| 33.240506 | 87 | 0.642422 |
acdef18784a1db241fa737b57e8187fef40abd29 | 26,062 | py | Python | backend/src/incidents/services.py | aijdissanayake/request-management | a88a2ce35a7a1a98630ffd14c1a31a5173b662c8 | [
"MIT"
] | null | null | null | backend/src/incidents/services.py | aijdissanayake/request-management | a88a2ce35a7a1a98630ffd14c1a31a5173b662c8 | [
"MIT"
] | 1 | 2022-02-28T01:46:21.000Z | 2022-02-28T01:46:21.000Z | backend/src/incidents/services.py | mohamednizar/request-management | a88a2ce35a7a1a98630ffd14c1a31a5173b662c8 | [
"MIT"
] | 1 | 2020-05-04T07:11:42.000Z | 2020-05-04T07:11:42.000Z | import os
import requests
from .models import (
Incident,
IncidentStatus,
StatusType,
SeverityType,
Reporter,
IncidentComment,
IncidentPoliceReport,
VerifyWorkflow,
EscalateExternalWorkflow,
CompleteActionWorkflow,
RequestAdviceWorkflow,
ProvideAdviceWorkflow,
AssignUserWorkflow,
EscalateWorkflow,
CloseWorkflow,
InvalidateWorkflow,
ReopenWorkflow
)
from django.contrib.auth.models import User, Group, Permission
from ..events import services as event_services
from ..events.models import Event
from ..file_upload.models import File
from ..custom_auth.models import Division, UserLevel
from django.db import connection
from .exceptions import WorkflowException, IncidentException
import pandas as pd
from django.http import HttpResponse
from xhtml2pdf import pisa
import json
from rest_framework.renderers import StaticHTMLRenderer
from django.db.models import Q
from .permissions import *
def is_valid_incident(incident_id: str) -> bool:
try:
incident = Incident.objects.get(id=incident_id)
return True
except Exception as e:
return False
def validateRecaptcha(response: str) -> bool:
params = {
'secret': os.environ.get('RECAPTCHA_SECRET_KEY'),
'response': response
}
validationResponse = requests.post(
'https://www.google.com/recaptcha/api/siteverify',
params
)
return validationResponse.json()['success']
def get_incident_by_id(incident_id: str) -> Incident:
try:
incident = Incident.objects.get(id=incident_id)
if incident is None:
raise IncidentException("Invalid incident id")
except:
raise IncidentException("Invalid incident id")
return incident
def get_user_by_id(user_id: str) -> User:
try:
user = User.objects.get(id=user_id)
if user is None:
raise IncidentException("Invalid user id")
except:
raise IncidentException("Invalid user id")
return user
def get_group_by_id(group_id: str) -> User:
try:
user = Group.objects.get(id=group_id)
if user is None:
raise IncidentException("Invalid group id")
except:
raise IncidentException("Invalid group id")
return user
def get_reporter_by_id(reporter_id: str) -> Incident:
try:
return Reporter.objects.get(id=reporter_id)
except Exception as e:
return None
def get_comments_by_incident(incident: Incident) -> IncidentComment:
try:
return IncidentComment.objects.get(incident=incident)
except Exception as e:
return None
def get_user_group(user: User):
user_groups = user.groups.all()
if len(user_groups) == 0:
raise WorkflowException("No group for current assignee")
return user_groups[0]
def get_user_orgnaization(user: User):
user_org = Group.objects.get(id=get_user_group(user).organization_id)
return user_org
def get_guest_user():
try:
return User.objects.get(username="guest")
except:
raise IncidentException("No guest user available")
def user_level_has_permission(user_level: UserLevel, permission: Permission):
permissions = Permission.objects.filter(group=user_level.role)
return permission in permissions
def get_user_from_level(user_level: UserLevel, division: Division) -> User:
print(user_level, division)
""" This function would take in a user level and find the user
within the level that has the least workload
It will query the assignee counts for each user and get the
one with lowest assignments
"""
sql = """
SELECT usr.id, COUNT(incident.id) as incident_count FROM `auth_user` as usr WHERE usr.is_active = true
LEFT JOIN incidents_incident as incident on incident.assignee_id = usr.id
INNER JOIN custom_auth_profile as prf on prf.user_id = usr.id
INNER JOIN custom_auth_userlevel as ulvl on prf.level_id = ulvl.id
INNER JOIN auth_group as grp on ulvl.role_id = grp.id
INNER JOIN custom_auth_division as udiv on prf.division_id = udiv.id
WHERE ulvl.code = "%s" AND udiv.code = "%s"
GROUP BY usr.id
ORDER BY incident_count ASC
""" % (user_level.code, division.code)
print(sql)
with connection.cursor() as cursor:
cursor.execute(sql)
row = cursor.fetchone()
if row is None:
return None
try:
assignee = User.objects.get(id=row[0])
return assignee
except:
return None
def find_candidate_from_division(current_division: Division, current_level: UserLevel, required_permission: Permission=None):
parent_level = current_level.parent
new_assignee = None
while new_assignee is None:
if parent_level is None:
# reached the top most position of current division
break
if required_permission is None or (
required_permission is not None and user_level_has_permission(parent_level, required_permission)
):
assignee = get_user_from_level(parent_level, current_division)
if assignee is not None:
new_assignee = assignee
break
# if the current user level doesn't have the permission or no assignees from
# current level
# check the parent user level of the current parent
# ie: traversing upwards the user hierarchy
parent_level = parent_level.parent
return new_assignee
def find_escalation_candidate(current_user: User) -> User:
""" This function finds an esclation candidate within the
<b>same organization</b>
"""
current_division = current_user.profile.division
current_level = current_user.profile.level
# first check if we can find a candidate from the same division
# ex: EC Gampaha Cordinator -> EC Gampaha Manager
new_assignee = find_candidate_from_division(current_division, current_level)
# either found an assignee or exhausted the current division
if new_assignee is not None:
return new_assignee
# this part means we have to search in the HQ of the organization
# for an assignee
hq_division = Division.objects.get(Q(is_hq=True) &
Q(organization=current_user.profile.organization))
if hq_division is None:
raise WorkflowException("Organization Hierarchy Configure Error - No HQ defined")
# in the new divsion, we start the search from the
# same level as the current user's parent
# if current user is Cordinator, we start with Managers in HQ
new_assignee = find_candidate_from_division(hq_division, current_level)
if new_assignee is None:
raise WorkflowException("Can't find escalation candidate for current user")
return new_assignee
def find_incident_assignee(current_user: User):
assignee = None
required_permission = Permission.objects.get(codename=CAN_MANAGE_INCIDENT)
default_division = Division.objects.get(is_default_division=True)
# first if a public user case
if current_user.username == "guest":
# guest is a user level under EC organization
# ideally we can check if the parent of the current user has
# permissions
assignee = find_candidate_from_division(default_division, current_user.profile.level)
else:
# this is a logged in user
# first check if the current user has the permission to manage an incident
if user_level_has_permission(current_user.profile.level, required_permission):
# if so assign it to self
return current_user
else:
# if not, first check if the current user is from EC -> or default org
if current_user.profile.organization == default_division.organization:
# then we can do a escalation on the assignment
assignee = find_escalation_candidate(current_user)
else:
# then this should be Police or anything else
# then we ONLY try to assign this to someone at the EC HQ
# ie: default division HQ
guest_user = get_guest_user()
assignee = find_candidate_from_division(default_division, guest_user.profile.level)
if assignee is None:
raise WorkflowException("Error in finding assignee")
return assignee
def create_reporter():
return Reporter()
def create_incident_postscript(incident: Incident, user: User) -> None:
"""Function to take care of event, status and severity creation"""
if user is None:
# public user case
# if no auth token, then we assign the guest user as public user
user = get_guest_user()
reporter = Reporter()
reporter.save()
incident.created_by = user
if(not incident.reporter):
incident.reporter = reporter
# incident.reporter = reporter
incident.assignee = user
incident.save()
event_services.create_incident_event(user, incident)
assignee = find_incident_assignee(user)
incident.assignee = assignee
# TODO: for police users, set the linked individuals property
if user.profile.organization != assignee.profile.organization:
incident.linked_individuals.add(user)
incident.save()
status = IncidentStatus(current_status=StatusType.NEW,
incident=incident, approved=True)
status.save()
return incident
def update_incident_postscript(incident: Incident, user: User, revision: str) -> None:
event_services.update_incident_event(user, incident, revision)
def update_incident_status(
incident: Incident, user: User, status_type_str: str
) -> None:
if incident.hasPendingStatusChange == "T":
return ("error", "Incident status is locked for pending changes")
try:
# check for valid status type
status_type = StatusType[status_type_str]
except:
return ("error", "Invalid status type")
if user.has_perm("incidents.can_request_status_change"):
# if user can't directly change the status
# only a pending change is added
status = IncidentStatus(
current_status=status_type,
previous_status=incident.current_status,
incident=incident,
approved=False,
)
status.save()
incident.hasPendingStatusChange = "T"
incident.save()
event_services.update_incident_status_event(
user, incident, status, False)
elif user.has_perm("incidents.can_change_status"):
status = IncidentStatus(
current_status=status_type,
previous_status=incident.current_status,
incident=incident,
approved=True,
)
status.save()
incident.hasPendingStatusChange = "F"
incident.save()
event_services.update_incident_status_event(
user, incident, status, True)
return ("success", "Status updated")
def create_incident_comment_postscript(
incident: Incident, user: User, comment: IncidentComment
) -> None:
"""Function to take care of event, status and severity creation"""
if comment.is_outcome:
event_services.create_outcome_event(user, incident, comment)
else:
event_services.create_comment_event(user, incident, comment)
def get_incidents_by_status(status_type_str: str) -> Incident:
try:
incidents = Incident.objects.all()
filtered_incidents = (
incident for incident in incidents if incident.current_status == status_type_str)
return filtered_incidents
except Exception as e:
return None
def get_incidents_before_date(date: str) -> Incident:
try:
return Incident.objects.all().filter(created_date__lte=date)
except Exception as e:
return None
def incident_escalate(user: User, incident: Incident, escalate_dir: str = "UP", comment=None, response_time=None):
if incident.assignee != user:
raise WorkflowException("Only current incident assignee can escalate the incident")
if (
# incident.current_status == StatusType.VERIFIED.name
incident.current_status == StatusType.NEW.name
or incident.current_status == StatusType.REOPENED.name
or incident.current_status == StatusType.ACTION_PENDING.name
or incident.current_status == StatusType.ADVICE_REQESTED.name
) :
raise WorkflowException("Incident cannot be escalated at this Status")
# find the rank of the current incident assignee
# assignee_groups = incident.assignee.groups.all()
# if len(assignee_groups) == 0:
# raise WorkflowException("No group for current assignee")
# current_rank = assignee_groups[0].rank
# # if escalate UP
# next_rank = current_rank - 1
# if escalate_dir == "DOWN":
# next_rank = current_rank + 1
# organization = get_user_orgnaization(user)
# next_group = Group.objects.get(rank=next_rank, organization_id=organization)
# if next_group is None:
# raise WorkflowException("Can't escalate %s from here" % escalate_dir)
# assignee = incident_auto_assign(incident, next_group)
assignee = find_escalation_candidate(user)
incident.assignee = assignee
incident.save()
# workflow
workflow = EscalateWorkflow(
incident=incident,
actioned_user=user,
comment=comment,
response_time=response_time,
assignee=assignee
)
workflow.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_change_assignee(user: User, incident: Incident, assignee: User):
# workflow
workflow = AssignUserWorkflow(
incident=incident,
actioned_user=user,
assignee=assignee
)
workflow.save()
incident.assignee = assignee
incident.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_close(user: User, incident: Incident, details: str):
# find number of outcomes for the incident
outcomes = IncidentComment.objects.filter(
incident=incident, is_outcome=True).count()
if incident.current_status == StatusType.ADVICE_REQESTED.name:
raise WorkflowException(
"All pending advices must be resolved first")
if incident.current_status == StatusType.ACTION_PENDING.name:
raise WorkflowException(
"All pending actions needs to be resolved first")
if outcomes == 0:
raise WorkflowException(
"Incident need at least 1 resolution outcome before closing")
status = IncidentStatus(
current_status=StatusType.CLOSED,
previous_status=incident.current_status,
incident=incident
)
status.save()
# workflow
workflow = CloseWorkflow(
incident=incident,
actioned_user=user,
assignees=details["assignee"],
entities=details["entities"],
departments=details["departments"],
individuals=details["individuals"],
comment=details["remark"]
)
workflow.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_escalate_external_action(user: User, incident: Incident, entity: object, comment: str):
evt_description = None
is_internal_user = entity["isInternalUser"]
workflow = EscalateExternalWorkflow(
is_internal_user=is_internal_user,
incident=incident,
actioned_user=user,
comment=comment
)
if is_internal_user:
escalated_user = get_user_by_id(entity["name"])
incident.linked_individuals.add(escalated_user)
incident.save()
workflow.escalated_user = escalated_user
else:
workflow.escalated_entity_other = entity["type"]
workflow.escalated_user_other = entity["name"]
workflow.save()
status = IncidentStatus(
current_status=StatusType.ACTION_PENDING,
previous_status=incident.current_status,
incident=incident,
approved=True
)
status.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_complete_external_action(user: User, incident: Incident, comment: str, start_event: Event):
initiated_workflow = start_event.refered_model
# complete workflow
workflow = CompleteActionWorkflow(
incident=incident,
actioned_user=user,
comment=comment,
initiated_workflow=initiated_workflow
)
workflow.save()
# complete previous workflow
initiated_workflow.is_action_completed = True
# TODO: find why django do this
if initiated_workflow.is_internal_user == None:
initiated_workflow.is_internal_user = False
initiated_workflow.save()
# check if there are any more pending actions
pending_actions = EscalateExternalWorkflow.objects.filter(Q(incident=incident) & Q(is_action_completed=False))
if pending_actions.count() == 0:
# new event
status = IncidentStatus(
current_status=StatusType.ACTION_TAKEN,
previous_status=incident.current_status,
incident=incident,
approved=True
)
status.save()
event_services.update_linked_workflow_event(user, incident, workflow, start_event)
def incident_request_advice(user: User, incident: Incident, assignee: User, comment: str):
if incident.current_status == StatusType.ADVICE_REQESTED.name:
raise WorkflowException("Incident already has a pending advice request")
# request workflow
workflow = RequestAdviceWorkflow(
incident=incident,
actioned_user=user,
comment=comment,
assigned_user=assignee
)
workflow.save()
status = IncidentStatus(
current_status=StatusType.ADVICE_REQESTED,
previous_status=incident.current_status,
incident=incident,
approved=True
)
status.save()
incident.linked_individuals.add(assignee)
incident.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_provide_advice(user: User, incident: Incident, advice: str, start_event: Event):
if not Incident.objects.filter(linked_individuals__id=user.id).exists():
raise WorkflowException("User not linked to the given incident")
if incident.current_status != StatusType.ADVICE_REQESTED.name:
raise WorkflowException("Incident does not have pending advice requests")
initiated_workflow = start_event.refered_model
# workflow
workflow = ProvideAdviceWorkflow(
incident=incident,
actioned_user=user,
comment=advice,
initiated_workflow=initiated_workflow
)
workflow.save()
# complete previous workflow
initiated_workflow.is_advice_provided = True
initiated_workflow.save()
status = IncidentStatus(
current_status=StatusType.ADVICE_PROVIDED,
previous_status=incident.current_status,
incident=incident,
approved=True
)
status.save()
# check this
incident.linked_individuals.remove(user.id)
event_services.update_linked_workflow_event(user, incident, workflow, start_event)
def incident_verify(user: User, incident: Incident, comment: str, proof: bool):
if not (incident.current_status == StatusType.NEW.name or \
incident.current_status == StatusType.REOPENED.name):
raise WorkflowException("Can only verify unverified incidents")
if incident.assignee != user:
raise WorkflowException("Only assignee can verify the incident")
# create workflow action
workflow = VerifyWorkflow(
incident=incident,
actioned_user=user,
comment=comment,
has_proof=proof
)
workflow.save()
status = IncidentStatus(
current_status=StatusType.VERIFIED,
previous_status=incident.current_status,
incident=incident,
approved=True
)
status.save()
if proof :
incident.proof = True
incident.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_invalidate(user: User, incident: Incident, comment: str):
if not (incident.current_status == StatusType.NEW.name or \
incident.current_status == StatusType.REOPENED.name):
raise WorkflowException("Only NEW or REOPENED incidents can be invalidated")
# workflow
workflow = InvalidateWorkflow(
incident=incident,
actioned_user=user,
comment=comment
)
workflow.save()
status = IncidentStatus(
previous_status=incident.current_status,
current_status=StatusType.INVALIDATED,
incident=incident,
approved=True
)
status.save()
event_services.update_workflow_event(user, incident, workflow)
def incident_reopen(user: User, incident: Incident, comment: str):
if incident.current_status != StatusType.CLOSED.name:
raise WorkflowException("Only CLOSED incidents can be invalidated")
# workflow
workflow = ReopenWorkflow(
incident=incident,
actioned_user=user,
comment=comment
)
workflow.save()
status = IncidentStatus(
previous_status=incident.current_status,
current_status=StatusType.REOPENED,
incident=incident,
approved=True
)
status.save()
event_services.update_workflow_event(user, incident, workflow)
def get_police_report_by_incident(incident: Incident):
try:
incident_police_report = IncidentPoliceReport.objects.get(incident=incident)
# if incident_police_report is None:
# raise IncidentException("No police report associated to the incident")
except:
# raise IncidentException("No police report associated to the incident")
return None
return incident_police_report
def get_incidents_to_escalate():
sql = """
SELECT b.incident_id, b.current_status, b.created_date
FROM incidents_incidentstatus b
INNER JOIN (
SELECT i.incident_id, max(i.created_date) cdate
FROM incidents_incidentstatus i
GROUP BY i.incident_id
) c
ON c.incident_id = b.incident_id AND c.cdate = b.created_date
WHERE b.`created_date` > NOW() - interval 120 minute AND
b.`current_status` <> 'CLOSED' AND
b.`current_status` <> 'ACTION_PENDING' AND
b.`current_status` <> 'NEW' AND
b.`current_status` <> 'ADVICE_REQESTED'
"""
with connection.cursor() as cursor:
cursor.execute(sql)
incidents = cursor.fetchall()
return incidents
def auto_escalate_incidents():
incident_details = get_incidents_to_escalate()
for incident_detail in incident_details :
incident = get_incident_by_id(incident_detail[0])
incident_escalate(incident.assignee, incident)
return incident_details
def attach_media(user:User, incident:Incident, uploaded_file:File):
""" Method to indicate media attachment """
event_services.media_attached_event(user, incident, uploaded_file)
def get_fitlered_incidents_report(incidents: Incident, output_format: str):
dataframe = pd.DataFrame(list(incidents.values("refId", "title", "description", "current_status", "current_severity", "response_time", "category")))
dataframe.columns = ["Ref ID", "Title", "Description", "Status", "Severity", "Response Time", "Category"]
if output_format == "csv":
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=incidents.csv'
dataframe.to_csv(path_or_buf=response,sep=';',float_format='%.2f',index=False,decimal=",")
return response
if output_format == "html":
# output = dataframe.to_html(float_format='%.2f',index=False)
output = write_to_html_file(dataframe, "Incidents")
output = output.encode('utf-8')
response = HttpResponse(content_type='text/html')
# response = HttpResponse(content_type='application/pdf')
# response['Content-Disposition'] = 'attachment; filename="incidents.pdf"'
# pisa.CreatePDF(output, dest=response)
# pisa.CreatePDF(output.encode('utf-8'), dest=response)
# pisa.CreatePDF(output.encode("ISO-8859-1"), dest=response)
# return response
return HttpResponse(output)
# if it's an unrecognized format, raise exception
raise IncidentException("Unrecognized export format '%s'" % output_format)
def write_to_html_file(df, title=''):
'''
Write an entire dataframe to an HTML file with nice formatting.
'''
result = '''
<html>
<head>
<meta charset="UTF-8">
<style>
@media print
{
button
{
display: none !important;
}
}
h2 {
text-align: center;
}
table {
margin-left: auto;
margin-right: auto;
}
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: center;
font-size: 90%;
}
table tbody tr:hover {
background-color: #dddddd;
}
.wide {
width: 90%;
}
</style>
</head>
<body>
<button onclick="window.print();return false;"> Print </button>
'''
result += '<h2> %s </h2>\n' % title
result += df.to_html(classes='wide', escape=False)
result += '''
</body>
</html>
'''
return result
def get_incident_by_reporter_unique_id(unique_id):
try:
reporter = Reporter.objects.get(unique_id=unique_id)
if reporter is None:
raise IncidentException("Invalid unique id")
incident = Incident.objects.get(reporter=reporter)
except:
raise IncidentException("Invalid unique id")
return incident
| 31.628641 | 152 | 0.679303 |
acdef194153a8aa28eb6415e06c16015b51c0216 | 489 | py | Python | app/webauthn/forms.py | onyxcherry/OnyxcherryOTP | 7ce5d224e5c2694048d567d0f60e2f2f22560a37 | [
"MIT"
] | 1 | 2021-06-26T22:22:55.000Z | 2021-06-26T22:22:55.000Z | app/webauthn/forms.py | onyxcherry/OnyxcherryOTP | 7ce5d224e5c2694048d567d0f60e2f2f22560a37 | [
"MIT"
] | null | null | null | app/webauthn/forms.py | onyxcherry/OnyxcherryOTP | 7ce5d224e5c2694048d567d0f60e2f2f22560a37 | [
"MIT"
] | 2 | 2021-02-21T15:59:40.000Z | 2021-02-21T19:05:57.000Z | from flask_babel import lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import HiddenField, StringField, SubmitField
from wtforms.validators import DataRequired, Length
class NameKey(FlaskForm):
credential_id = HiddenField()
key_name = StringField(
_l("Name: "), validators=[DataRequired(), Length(max=64)]
)
submit = SubmitField(_l("Submit"))
class DeleteKey(FlaskForm):
credential_id = HiddenField()
submit = SubmitField(_l("Submit"))
| 27.166667 | 65 | 0.736196 |
acdef1a350d275d8f55f19e7c80fd7de9dd918ad | 136,157 | py | Python | mne/report/report.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | mne/report/report.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | mne/report/report.py | lokinou/mne-python | f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b | [
"BSD-3-Clause"
] | null | null | null | """Generate self-contained HTML reports from MNE objects."""
# Authors: Alex Gramfort <alexandre.gramfort@inria.fr>
# Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD-3-Clause
import io
import dataclasses
from dataclasses import dataclass
from typing import Tuple
from collections.abc import Sequence
import base64
from io import BytesIO, StringIO
import contextlib
import os
import os.path as op
from pathlib import Path
import fnmatch
import re
from shutil import copyfile
import time
import warnings
import webbrowser
import numpy as np
from .. import __version__ as MNE_VERSION
from ..fixes import _compare_version
from .. import (read_evokeds, read_events, read_cov,
read_source_estimate, read_trans, sys_info,
Evoked, SourceEstimate, Covariance, Info, Transform)
from ..channels import _get_ch_type
from ..defaults import _handle_default
from ..io import read_raw, read_info, BaseRaw
from ..io._read_raw import supported as extension_reader_map
from ..io.pick import _DATA_CH_TYPES_SPLIT
from ..proj import read_proj
from .._freesurfer import _reorient_image, _mri_orientation
from ..utils import (logger, verbose, get_subjects_dir, warn, _ensure_int,
fill_doc, _check_option, _validate_type, _safe_input,
_path_like, use_log_level, _check_fname, _VerboseDep,
_check_ch_locs, _import_h5io_funcs)
from ..viz import (plot_events, plot_alignment, plot_cov, plot_projs_topomap,
plot_compare_evokeds, set_3d_view, get_3d_backend,
Figure3D, use_browser_backend)
from ..viz.misc import _plot_mri_contours, _get_bem_plotting_surfaces
from ..viz.utils import _ndarray_to_fig, tight_layout
from ..forward import read_forward_solution, Forward
from ..epochs import read_epochs, BaseEpochs
from ..preprocessing.ica import read_ica
from .. import dig_mri_distances
from ..minimum_norm import read_inverse_operator, InverseOperator
from ..parallel import parallel_func, check_n_jobs
_BEM_VIEWS = ('axial', 'sagittal', 'coronal')
# For raw files, we want to support different suffixes + extensions for all
# supported file formats
SUPPORTED_READ_RAW_EXTENSIONS = tuple(extension_reader_map.keys())
RAW_EXTENSIONS = []
for ext in SUPPORTED_READ_RAW_EXTENSIONS:
RAW_EXTENSIONS.append(f'raw{ext}')
if ext not in ('.bdf', '.edf', '.set', '.vhdr'): # EEG-only formats
RAW_EXTENSIONS.append(f'meg{ext}')
RAW_EXTENSIONS.append(f'eeg{ext}')
RAW_EXTENSIONS.append(f'ieeg{ext}')
RAW_EXTENSIONS.append(f'nirs{ext}')
# Processed data will always be in (gzipped) FIFF format
VALID_EXTENSIONS = ('sss.fif', 'sss.fif.gz',
'eve.fif', 'eve.fif.gz',
'cov.fif', 'cov.fif.gz',
'proj.fif', 'prof.fif.gz',
'trans.fif', 'trans.fif.gz',
'fwd.fif', 'fwd.fif.gz',
'epo.fif', 'epo.fif.gz',
'inv.fif', 'inv.fif.gz',
'ave.fif', 'ave.fif.gz',
'T1.mgz') + tuple(RAW_EXTENSIONS)
del RAW_EXTENSIONS
CONTENT_ORDER = (
'raw',
'events',
'epochs',
'ssp-projectors',
'evoked',
'covariance',
'coregistration',
'bem',
'forward-solution',
'inverse-operator',
'source-estimate'
)
html_include_dir = Path(__file__).parent / 'js_and_css'
template_dir = Path(__file__).parent / 'templates'
JAVASCRIPT = (html_include_dir / 'report.js').read_text(encoding='utf-8')
CSS = (html_include_dir / 'report.sass').read_text(encoding='utf-8')
MAX_IMG_RES = 100 # in dots per inch
MAX_IMG_WIDTH = 850 # in pixels
def _get_ch_types(inst):
return [ch_type for ch_type in _DATA_CH_TYPES_SPLIT if ch_type in inst]
###############################################################################
# HTML generation
def _html_header_element(*, lang, include, js, css, title, tags, mne_logo_img):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('header.html.jinja')
t_rendered = t.render(
lang=lang, include=include, js=js, css=css, title=title, tags=tags,
mne_logo_img=mne_logo_img
)
return t_rendered
def _html_footer_element(*, mne_version, date):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('footer.html.jinja')
t_rendered = t.render(mne_version=mne_version, date=date)
return t_rendered
def _html_toc_element(*, content_elements):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('toc.html.jinja')
t_rendered = t.render(content_elements=content_elements)
return t_rendered
def _html_raw_element(*, id, repr, psd, butterfly, ssp_projs, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('raw.html.jinja')
t_rendered = t.render(
id=id, repr=repr, psd=psd, butterfly=butterfly, ssp_projs=ssp_projs,
tags=tags, title=title
)
return t_rendered
def _html_epochs_element(*, id, repr, metadata, erp_imgs, drop_log, psd,
ssp_projs, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('epochs.html.jinja')
t_rendered = t.render(
id=id, repr=repr, metadata=metadata, erp_imgs=erp_imgs,
drop_log=drop_log, psd=psd, ssp_projs=ssp_projs, tags=tags, title=title
)
return t_rendered
def _html_evoked_element(*, id, joint, slider, gfp, whitened, ssp_projs, title,
tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('evoked.html.jinja')
t_rendered = t.render(
id=id, joint=joint, slider=slider, gfp=gfp, whitened=whitened,
ssp_projs=ssp_projs, tags=tags, title=title
)
return t_rendered
def _html_cov_element(*, id, matrix, svd, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('cov.html.jinja')
t_rendered = t.render(
id=id, matrix=matrix, svd=svd, tags=tags, title=title
)
return t_rendered
def _html_forward_sol_element(*, id, repr, sensitivity_maps, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('forward.html.jinja')
t_rendered = t.render(
id=id, repr=repr, sensitivity_maps=sensitivity_maps, tags=tags,
title=title
)
return t_rendered
def _html_inverse_operator_element(*, id, repr, source_space, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('inverse.html.jinja')
t_rendered = t.render(
id=id, repr=repr, source_space=source_space, tags=tags, title=title
)
return t_rendered
def _html_ica_element(*, id, repr, overlay, ecg, eog, ecg_scores, eog_scores,
properties, topographies, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('ica.html.jinja')
t_rendered = t.render(
id=id, repr=repr, overlay=overlay, ecg=ecg, eog=eog,
ecg_scores=ecg_scores, eog_scores=eog_scores, properties=properties,
topographies=topographies, tags=tags, title=title
)
return t_rendered
def _html_slider_element(*, id, images, captions, start_idx, image_format,
title, tags, klass=''):
from ..html_templates import report_templates_env
captions_ = []
for caption in captions:
if caption is None:
caption = ''
captions_.append(caption)
del captions
t = report_templates_env.get_template('slider.html.jinja')
t_rendered = t.render(
id=id, images=images, captions=captions_, tags=tags, title=title,
start_idx=start_idx, image_format=image_format, klass=klass
)
return t_rendered
def _html_image_element(*, id, img, image_format, caption, show, div_klass,
img_klass, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('image.html.jinja')
t_rendered = t.render(
id=id, img=img, caption=caption, tags=tags, title=title,
image_format=image_format, div_klass=div_klass, img_klass=img_klass,
show=show
)
return t_rendered
def _html_code_element(*, id, code, language, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('code.html.jinja')
t_rendered = t.render(
id=id, code=code, language=language, title=title, tags=tags
)
return t_rendered
def _html_element(*, id, div_klass, html, title, tags):
from ..html_templates import report_templates_env
t = report_templates_env.get_template('html.html.jinja')
t_rendered = t.render(
id=id, div_klass=div_klass, html=html, title=title, tags=tags
)
return t_rendered
@dataclass
class _ContentElement:
name: str
dom_id: str
tags: Tuple[str]
html: str
def _check_tags(tags) -> Tuple[str]:
# Must be iterable, but not a string
if isinstance(tags, str):
tags = (tags,)
elif isinstance(tags, (Sequence, np.ndarray)):
tags = tuple(tags)
else:
raise TypeError(
f'tags must be a string (without spaces or special characters) or '
f'an array-like object of such strings, but got {type(tags)} '
f'instead: {tags}'
)
# Check for invalid dtypes
bad_tags = [tag for tag in tags
if not isinstance(tag, str)]
if bad_tags:
raise TypeError(
f'All tags must be strings without spaces or special characters, '
f'but got the following instead: '
f'{", ".join([str(tag) for tag in bad_tags])}'
)
# Check for invalid characters
invalid_chars = (' ', '"', '\n') # we'll probably find more :-)
bad_tags = []
for tag in tags:
for invalid_char in invalid_chars:
if invalid_char in tag:
bad_tags.append(tag)
break
if bad_tags:
raise ValueError(
f'The following tags contained invalid characters: '
f'{", ".join(repr(tag) for tag in bad_tags)}'
)
return tags
###############################################################################
# PLOTTING FUNCTIONS
def _constrain_fig_resolution(fig, *, max_width, max_res):
"""Limit the resolution (DPI) of a figure.
Parameters
----------
fig : matplotlib.figure.Figure
The figure whose DPI to adjust.
max_width : int
The max. allowed width, in pixels.
max_res : int
The max. allowed resolution, in DPI.
Returns
-------
Nothing, alters the figure's properties in-place.
"""
dpi = min(max_res, max_width / fig.get_size_inches()[0])
fig.set_dpi(dpi)
def _fig_to_img(fig, *, image_format='png', own_figure=True):
"""Plot figure and create a binary image."""
# fig can be ndarray, mpl Figure, PyVista Figure
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
_validate_type(fig, (np.ndarray, Figure, Figure3D), 'fig')
if isinstance(fig, np.ndarray):
# In this case, we are creating the fig, so we might as well
# auto-close in all cases
fig = _ndarray_to_fig(fig)
if own_figure:
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
own_figure = True # close the figure we just created
elif not isinstance(fig, Figure):
from ..viz.backends.renderer import backend, MNE_3D_BACKEND_TESTING
backend._check_3d_figure(figure=fig)
if not MNE_3D_BACKEND_TESTING:
img = backend._take_3d_screenshot(figure=fig)
else: # Testing mode
img = np.zeros((2, 2, 3))
if own_figure:
backend._close_3d_figure(figure=fig)
fig = _ndarray_to_fig(img)
if own_figure:
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
own_figure = True # close the fig we just created
output = BytesIO()
logger.debug(
f'Saving figure with dimension {fig.get_size_inches()} inches with '
f'{fig.get_dpi()} dpi'
)
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='.*Axes that are not compatible with tight_layout.*',
category=UserWarning
)
fig.savefig(output, format=image_format, dpi=fig.get_dpi())
if own_figure:
plt.close(fig)
output = output.getvalue()
return (output.decode('utf-8') if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper.
Keeps font size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
This is a heuristic but it seems to work for most cases.
"""
scale = float(scale)
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
else:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _get_bem_contour_figs_as_arrays(
*, sl, n_jobs, mri_fname, surfaces, orientation, src, show,
show_orientation, width
):
"""Render BEM surface contours on MRI slices.
Returns
-------
list of array
A list of NumPy arrays that represent the generated Matplotlib figures.
"""
# Matplotlib <3.2 doesn't work nicely with process-based parallelization
from matplotlib import __version__ as MPL_VERSION
if _compare_version(MPL_VERSION, '>=', '3.2'):
prefer = 'processes'
else:
prefer = 'threads'
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs,
prefer=prefer)
outs = parallel(
p_fun(
slices=s, mri_fname=mri_fname, surfaces=surfaces,
orientation=orientation, src=src, show=show,
show_orientation=show_orientation, width=width,
slices_as_subplots=False
)
for s in np.array_split(sl, use_jobs)
)
out = list()
for o in outs:
out.extend(o)
return out
def _iterate_trans_views(function, alpha, **kwargs):
"""Auxiliary function to iterate over views in trans fig."""
from ..viz import create_3d_figure
from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING
# TODO: Eventually maybe we should expose the size option?
size = (80, 80) if MNE_3D_BACKEND_TESTING else (800, 800)
fig = create_3d_figure(size, bgcolor=(0.5, 0.5, 0.5))
from ..viz.backends.renderer import backend
try:
try:
return _itv(
function, fig, surfaces={'head-dense': alpha}, **kwargs
)
except IOError:
return _itv(function, fig, surfaces={'head': alpha}, **kwargs)
finally:
backend._close_3d_figure(fig)
def _itv(function, fig, **kwargs):
from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING, backend
from ..viz._brain.view import views_dicts
function(fig=fig, **kwargs)
views = (
'frontal', 'lateral', 'medial',
'axial', 'rostral', 'coronal'
)
images = []
for view in views:
if not MNE_3D_BACKEND_TESTING:
set_3d_view(fig, **views_dicts['both'][view])
backend._check_3d_figure(fig)
im = backend._take_3d_screenshot(figure=fig)
else: # Testing mode
im = np.zeros((2, 2, 3))
images.append(im)
images = np.concatenate(
[np.concatenate(images[:3], axis=1),
np.concatenate(images[3:], axis=1)],
axis=0)
try:
dists = dig_mri_distances(info=kwargs['info'],
trans=kwargs['trans'],
subject=kwargs['subject'],
subjects_dir=kwargs['subjects_dir'],
on_defects='ignore')
caption = (f'Average distance from {len(dists)} digitized points to '
f'head: {1e3 * np.mean(dists):.2f} mm')
except BaseException as e:
caption = 'Distances could not be calculated from digitized points'
warn(f'{caption}: {e}')
img = _fig_to_img(images, image_format='png')
return img, caption
def _plot_ica_properties_as_arrays(*, ica, inst, picks, n_jobs):
"""Parallelize ICA component properties plotting, and return arrays.
Returns
-------
outs : list of array
The properties plots as NumPy arrays.
"""
import matplotlib.pyplot as plt
if picks is None:
picks = list(range(ica.n_components_))
def _plot_one_ica_property(*, ica, inst, pick):
figs = ica.plot_properties(inst=inst, picks=pick, show=False)
assert len(figs) == 1
fig = figs[0]
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
with io.BytesIO() as buff:
fig.savefig(
buff,
format='png',
pad_inches=0,
)
buff.seek(0)
fig_array = plt.imread(buff, format='png')
plt.close(fig)
return fig_array
use_jobs = min(n_jobs, max(1, len(picks)))
parallel, p_fun, _ = parallel_func(
func=_plot_one_ica_property,
n_jobs=use_jobs
)
outs = parallel(
p_fun(
ica=ica, inst=inst, pick=pick
) for pick in picks
)
return outs
###############################################################################
# TOC FUNCTIONS
def _endswith(fname, suffixes):
"""Aux function to test if file name includes the specified suffixes."""
if isinstance(suffixes, str):
suffixes = [suffixes]
for suffix in suffixes:
for ext in SUPPORTED_READ_RAW_EXTENSIONS:
if fname.endswith((f'-{suffix}{ext}', f'-{suffix}{ext}',
f'_{suffix}{ext}', f'_{suffix}{ext}')):
return True
return False
def open_report(fname, **params):
"""Read a saved report or, if it doesn't exist yet, create a new one.
The returned report can be used as a context manager, in which case any
changes to the report are saved when exiting the context block.
Parameters
----------
fname : str
The file containing the report, stored in the HDF5 format. If the file
does not exist yet, a new report is created that will be saved to the
specified file.
**params : kwargs
When creating a new report, any named parameters other than ``fname``
are passed to the ``__init__`` function of the `Report` object. When
reading an existing report, the parameters are checked with the
loaded report and an exception is raised when they don't match.
Returns
-------
report : instance of Report
The report.
"""
fname = _check_fname(fname=fname, overwrite='read', must_exist=False)
if op.exists(fname):
# Check **params with the loaded report
read_hdf5, _ = _import_h5io_funcs()
state = read_hdf5(fname, title='mnepython')
for param in params.keys():
if param not in state:
raise ValueError('The loaded report has no attribute %s' %
param)
if params[param] != state[param]:
raise ValueError("Attribute '%s' of loaded report does not "
"match the given parameter." % param)
report = Report()
report.__setstate__(state)
else:
report = Report(**params)
# Keep track of the filename in case the Report object is used as a context
# manager.
report.fname = fname
return report
###############################################################################
# HTML scan renderer
mne_logo_path = Path(__file__).parents[1] / 'icons' / 'mne_icon-cropped.png'
mne_logo = base64.b64encode(mne_logo_path.read_bytes()).decode('ascii')
def _check_scale(scale):
"""Ensure valid scale value is passed."""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
def _check_image_format(rep, image_format):
"""Ensure fmt is valid."""
if rep is None or image_format is not None:
_check_option('image_format', image_format,
allowed_values=('png', 'svg', 'gif'))
else:
image_format = rep.image_format
return image_format
@fill_doc
class Report(_VerboseDep):
r"""Object for rendering HTML.
Parameters
----------
info_fname : None | str
Name of the file containing the info dictionary.
%(subjects_dir)s
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : None | str
Name of the file containing the noise covariance.
%(baseline_report)s
Defaults to ``None``, i.e. no baseline correction.
image_format : 'png' | 'svg' | 'gif'
Default image format to use (default is ``'png'``).
``'svg'`` uses vector graphics, so fidelity is higher but can increase
file size and browser image rendering time as well.
.. versionadded:: 0.15
raw_psd : bool | dict
If True, include PSD plots for raw files. Can be False (default) to
omit, True to plot, or a dict to pass as ``kwargs`` to
:meth:`mne.io.Raw.plot_psd`.
.. versionadded:: 0.17
projs : bool
Whether to include topographic plots of SSP projectors, if present in
the data. Defaults to ``False``.
.. versionadded:: 0.21
%(verbose)s
Attributes
----------
info_fname : None | str
Name of the file containing the info dictionary.
%(subjects_dir)s
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : None | str
Name of the file containing the noise covariance.
%(baseline_report)s
Defaults to ``None``, i.e. no baseline correction.
image_format : str
Default image format to use.
.. versionadded:: 0.15
raw_psd : bool | dict
If True, include PSD plots for raw files. Can be False (default) to
omit, True to plot, or a dict to pass as ``kwargs`` to
:meth:`mne.io.Raw.plot_psd`.
.. versionadded:: 0.17
projs : bool
Whether to include topographic plots of SSP projectors, if present in
the data. Defaults to ``False``.
.. versionadded:: 0.21
%(verbose)s
html : list of str
Contains items of html-page.
include : list of str
Dictionary containing elements included in head.
fnames : list of str
List of file names rendered.
sections : list of str
List of sections.
lang : str
language setting for the HTML file.
Notes
-----
See :ref:`tut-report` for an introduction to using ``mne.Report``.
.. versionadded:: 0.8.0
"""
@verbose
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
image_format='png', raw_psd=False, projs=False, *,
verbose=None):
self.info_fname = str(info_fname) if info_fname is not None else None
self.cov_fname = str(cov_fname) if cov_fname is not None else None
self.baseline = baseline
if subjects_dir is not None:
subjects_dir = get_subjects_dir(subjects_dir)
self.subjects_dir = subjects_dir
self.subject = subject
self.title = title
self.image_format = _check_image_format(None, image_format)
self.projs = projs
self._dom_id = 0
self._content = []
self.include = []
self.lang = 'en-us' # language setting for the HTML file
if not isinstance(raw_psd, bool) and not isinstance(raw_psd, dict):
raise TypeError('raw_psd must be bool or dict, got %s'
% (type(raw_psd),))
self.raw_psd = raw_psd
self._init_render() # Initialize the renderer
self.fname = None # The name of the saved report
self.data_path = None
def __repr__(self):
"""Print useful info about report."""
s = f'<Report | {len(self._content)} items'
if self.title is not None:
s += f' | {self.title}'
content_element_names = [element.name for element in self._content]
if len(content_element_names) > 4:
first_entries = '\n'.join(content_element_names[:2])
last_entries = '\n'.join(content_element_names[-2:])
s += f'\n{first_entries}'
s += '\n ...\n'
s += last_entries
elif len(content_element_names) > 0:
entries = '\n'.join(content_element_names)
s += f'\n{entries}'
s += '\n>'
return s
def __len__(self):
"""Return the number of files processed by the report.
Returns
-------
n_files : int
The number of files processed.
"""
return len(self._content)
@staticmethod
def _get_state_params():
# Which attributes to store in and read from HDF5 files
return (
'baseline', 'cov_fname', 'include', '_content', 'image_format',
'info_fname', '_dom_id', 'raw_psd', 'projs',
'subjects_dir', 'subject', 'title', 'data_path', 'lang',
'fname'
)
def _get_dom_id(self):
"""Get id of plot."""
self._dom_id += 1
return f'global{self._dom_id}'
def _validate_topomap_kwargs(self, topomap_kwargs):
_validate_type(topomap_kwargs, (dict, None), 'topomap_kwargs')
topomap_kwargs = dict() if topomap_kwargs is None else topomap_kwargs
return topomap_kwargs
def _validate_input(self, items, captions, tag, comments=None):
"""Validate input."""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)) and comments is not None:
comments = [comments]
if comments is not None and len(comments) != len(items):
raise ValueError(
f'Number of "comments" and report items must be equal, '
f'or comments should be None; got '
f'{len(comments)} and {len(items)}'
)
elif captions is not None and len(captions) != len(items):
raise ValueError(
f'Number of "captions" and report items must be equal; '
f'got {len(captions)} and {len(items)}'
)
return items, captions, comments
@property
def html(self):
return [element.html for element in self._content]
@property
def tags(self):
"""All tags currently used in the report."""
tags = []
for c in self._content:
tags.extend(c.tags)
tags = tuple(sorted(set(tags)))
return tags
def add_custom_css(self, css):
"""Add custom CSS to the report.
Parameters
----------
css : str
Style definitions to add to the report. The content of this string
will be embedded between HTML ``<style>`` and ``</style>`` tags.
Notes
-----
.. versionadded:: 0.23
"""
style = f'\n<style type="text/css">\n{css}\n</style>'
self.include += style
def add_custom_js(self, js):
"""Add custom JavaScript to the report.
Parameters
----------
js : str
JavaScript code to add to the report. The content of this string
will be embedded between HTML ``<script>`` and ``</script>`` tags.
Notes
-----
.. versionadded:: 0.23
"""
script = f'\n<script type="text/javascript">\n{js}\n</script>'
self.include += script
@fill_doc
def add_epochs(
self, epochs, title, *, psd=True, projs=None, topomap_kwargs=None,
drop_log_ignore=('IGNORED',), tags=('epochs',), replace=False
):
"""Add `~mne.Epochs` to the report.
Parameters
----------
epochs : path-like | instance of Epochs
The epochs to add to the report.
title : str
The title to add.
psd : bool | float
If a float, the duration of data to use for creation of PSD plots,
in seconds. PSD will be calculated on as many epochs as required to
cover at least this duration. Epochs will be picked across the
entire time range in equally-spaced distance.
.. note::
In rare edge cases, we may not be able to create a grid of
equally-spaced epochs that cover the entire requested time range.
In these situations, a warning will be emitted, informing you
about the duration that's actually being used.
If ``True``, add PSD plots based on all ``epochs``. If ``False``,
do not add PSD plots.
%(projs_report)s
%(topomap_kwargs)s
drop_log_ignore : array-like of str
The drop reasons to ignore when creating the drop log bar plot.
All epochs for which a drop reason listed here appears in
``epochs.drop_log`` will be excluded from the drop log plot.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
add_projs = self.projs if projs is None else projs
htmls = self._render_epochs(
epochs=epochs,
psd=psd,
add_projs=add_projs,
topomap_kwargs=topomap_kwargs,
drop_log_ignore=drop_log_ignore,
tags=tags,
image_format=self.image_format,
)
(repr_html, metadata_html, erp_imgs_html, drop_log_html, psd_html,
ssp_projs_html) = htmls
dom_id = self._get_dom_id()
html = _html_epochs_element(
repr=repr_html,
metadata=metadata_html,
erp_imgs=erp_imgs_html,
drop_log=drop_log_html,
psd=psd_html,
ssp_projs=ssp_projs_html,
tags=tags,
title=title,
id=dom_id,
)
self._add_or_replace(
name=title,
dom_id=dom_id,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_evokeds(self, evokeds, *, titles=None, noise_cov=None, projs=None,
n_time_points=None, tags=('evoked',), replace=False,
topomap_kwargs=None, n_jobs=1):
"""Add `~mne.Evoked` objects to the report.
Parameters
----------
evokeds : path-like | instance of Evoked | list of Evoked
The evoked data to add to the report. Multiple `~mne.Evoked`
objects – as returned from `mne.read_evokeds` – can be passed as
a list.
titles : str | list of str | None
The titles corresponding to the evoked data. If ``None``, the
content of ``evoked.comment`` from each evoked will be used as
title.
noise_cov : path-like | instance of Covariance | None
A noise covariance matrix. If provided, will be used to whiten
the ``evokeds``. If ``None``, will fall back to the ``cov_fname``
provided upon report creation.
%(projs_report)s
n_time_points : int | None
The number of equidistant time points to render. If ``None``,
will render each `~mne.Evoked` at 21 time points, unless the data
contains fewer time points, in which case all will be rendered.
%(tags_report)s
%(replace_report)s
%(topomap_kwargs)s
%(n_jobs)s
Notes
-----
.. versionadded:: 0.24.0
"""
if isinstance(evokeds, Evoked):
evokeds = [evokeds]
elif isinstance(evokeds, list):
pass
else:
evoked_fname = evokeds
logger.debug(f'Evoked: Reading {evoked_fname}')
evokeds = read_evokeds(evoked_fname, verbose=False)
if self.baseline is not None:
evokeds = [e.copy().apply_baseline(self.baseline)
for e in evokeds]
if titles is None:
titles = [e.comment for e in evokeds]
elif isinstance(titles, str):
titles = [titles]
if len(evokeds) != len(titles):
raise ValueError(
f'Number of evoked objects ({len(evokeds)}) must '
f'match number of captions ({len(titles)})'
)
if noise_cov is None:
noise_cov = self.cov_fname
if noise_cov is not None and not isinstance(noise_cov, Covariance):
noise_cov = read_cov(fname=noise_cov)
tags = _check_tags(tags)
add_projs = self.projs if projs is None else projs
for evoked, title in zip(evokeds, titles):
evoked_htmls = self._render_evoked(
evoked=evoked,
noise_cov=noise_cov,
image_format=self.image_format,
add_projs=add_projs,
n_time_points=n_time_points,
tags=tags,
topomap_kwargs=topomap_kwargs,
n_jobs=n_jobs
)
(joint_html, slider_html, gfp_html, whitened_html,
ssp_projs_html) = evoked_htmls
dom_id = self._get_dom_id()
html = _html_evoked_element(
id=dom_id,
joint=joint_html,
slider=slider_html,
gfp=gfp_html,
whitened=whitened_html,
ssp_projs=ssp_projs_html,
title=title,
tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_raw(
self, raw, title, *, psd=None, projs=None, butterfly=True,
scalings=None, tags=('raw',), replace=False, topomap_kwargs=None
):
"""Add `~mne.io.Raw` objects to the report.
Parameters
----------
raw : path-like | instance of Raw
The data to add to the report.
title : str
The title corresponding to the ``raw`` object.
psd : bool | None
Whether to add PSD plots. Overrides the ``raw_psd`` parameter
passed when initializing the `~mne.Report`. If ``None``, use
``raw_psd`` from `~mne.Report` creation.
%(projs_report)s
butterfly : bool | int
Whether to add butterfly plots of the data. Can be useful to
spot problematic channels. If ``True``, 10 equally-spaced 1-second
segments will be plotted. If an integer, specifies the number of
1-second segments to plot. Larger numbers may take a considerable
amount of time if the data contains many sensors. You can disable
butterfly plots altogether by passing ``False``.
%(scalings)s
%(tags_report)s
%(replace_report)s
%(topomap_kwargs)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
if psd is None:
add_psd = dict() if self.raw_psd is True else self.raw_psd
elif psd is True:
add_psd = dict()
else:
add_psd = False
add_projs = self.projs if projs is None else projs
htmls = self._render_raw(
raw=raw,
add_psd=add_psd,
add_projs=add_projs,
butterfly=butterfly,
butterfly_scalings=scalings,
image_format=self.image_format,
tags=tags,
topomap_kwargs=topomap_kwargs,
)
repr_html, psd_img_html, butterfly_imgs_html, ssp_proj_img_html = htmls
dom_id = self._get_dom_id()
html = _html_raw_element(
repr=repr_html,
psd=psd_img_html,
butterfly=butterfly_imgs_html,
ssp_projs=ssp_proj_img_html,
tags=tags,
title=title,
id=dom_id,
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_stc(self, stc, title, *, subject=None, subjects_dir=None,
n_time_points=None, tags=('source-estimate',), replace=False,
stc_plot_kwargs=None):
"""Add a `~mne.SourceEstimate` (STC) to the report.
Parameters
----------
stc : path-like | instance of SourceEstimate
The `~mne.SourceEstimate` to add to the report.
title : str
The title to add.
subject : str | None
The name of the FreeSurfer subject the STC belongs to. The name is
not stored with the STC data and therefore needs to be specified.
If ``None``, will use the value of ``subject`` passed on report
creation.
subjects_dir : path-like | None
The FreeSurfer ``SUBJECTS_DIR``.
n_time_points : int | None
The number of equidistant time points to render. If ``None``,
will render ``stc`` at 51 time points, unless the data
contains fewer time points, in which case all will be rendered.
%(tags_report)s
%(replace_report)s
%(stc_plot_kwargs_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
html, dom_id = self._render_stc(
stc=stc,
title=title,
tags=tags,
image_format=self.image_format,
subject=subject,
subjects_dir=subjects_dir,
n_time_points=n_time_points,
stc_plot_kwargs=stc_plot_kwargs
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_forward(self, forward, title, *, subject=None, subjects_dir=None,
tags=('forward-solution',), replace=False):
"""Add a forward solution.
Parameters
----------
forward : instance of Forward | path-like
The forward solution to add to the report.
title : str
The title corresponding to forward solution.
subject : str | None
The name of the FreeSurfer subject ``forward`` belongs to. If
provided, the sensitivity maps of the forward solution will
be visualized. If ``None``, will use the value of ``subject``
passed on report creation. If supplied, also pass ``subjects_dir``.
subjects_dir : path-like | None
The FreeSurfer ``SUBJECTS_DIR``.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
html, dom_id = self._render_forward(
forward=forward, subject=subject, subjects_dir=subjects_dir,
title=title, image_format=self.image_format, tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_inverse_operator(self, inverse_operator, title, *, subject=None,
subjects_dir=None, trans=None,
tags=('inverse-operator',), replace=False):
"""Add an inverse operator.
Parameters
----------
inverse_operator : instance of InverseOperator | path-like
The inverse operator to add to the report.
title : str
The title corresponding to the inverse operator object.
subject : str | None
The name of the FreeSurfer subject ``inverse_op`` belongs to. If
provided, the source space the inverse solution is based on will
be visualized. If ``None``, will use the value of ``subject``
passed on report creation. If supplied, also pass ``subjects_dir``
and ``trans``.
subjects_dir : path-like | None
The FreeSurfer ``SUBJECTS_DIR``.
trans : path-like | instance of Transform | None
The ``head -> MRI`` transformation for ``subject``.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
if ((subject is not None and trans is None) or
(trans is not None and subject is None)):
raise ValueError('Please pass subject AND trans, or neither.')
html, dom_id = self._render_inverse_operator(
inverse_operator=inverse_operator, subject=subject,
subjects_dir=subjects_dir, trans=trans, title=title,
image_format=self.image_format, tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_trans(self, trans, *, info, title, subject=None, subjects_dir=None,
alpha=None, tags=('coregistration',), replace=False):
"""Add a coregistration visualization to the report.
Parameters
----------
trans : path-like | instance of Transform
The ``head -> MRI`` transformation to render.
info : path-like | instance of Info
The `~mne.Info` corresponding to ``trans``.
title : str
The title to add.
subject : str | None
The name of the FreeSurfer subject the ``trans```` belong to. The
name is not stored with the ``trans`` and therefore needs to be
specified. If ``None``, will use the value of ``subject`` passed on
report creation.
subjects_dir : path-like | None
The FreeSurfer ``SUBJECTS_DIR``.
alpha : float | None
The level of opacity to apply to the head surface. If a float, must
be between 0 and 1 (inclusive), where 1 means fully opaque. If
``None``, will use the MNE-Python default value.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
html, dom_id = self._render_trans(
trans=trans,
info=info,
subject=subject,
subjects_dir=subjects_dir,
alpha=alpha,
title=title,
tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_covariance(self, cov, *, info, title, tags=('covariance',),
replace=False):
"""Add covariance to the report.
Parameters
----------
cov : path-like | instance of Covariance
The `~mne.Covariance` to add to the report.
info : path-like | instance of Info
The `~mne.Info` corresponding to ``cov``.
title : str
The title corresponding to the `~mne.Covariance` object.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
htmls = self._render_cov(
cov=cov,
info=info,
image_format=self.image_format,
tags=tags
)
cov_matrix_html, cov_svd_html = htmls
dom_id = self._get_dom_id()
html = _html_cov_element(
matrix=cov_matrix_html,
svd=cov_svd_html,
tags=tags,
title=title,
id=dom_id
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_events(self, events, title, *, event_id=None, sfreq, first_samp=0,
tags=('events',), replace=False):
"""Add events to the report.
Parameters
----------
events : path-like | array, shape (n_events, 3)
An MNE-Python events array.
title : str
The title corresponding to the events.
event_id : dict
A dictionary mapping event names (keys) to event codes (values).
sfreq : float
The sampling frequency used while recording.
first_samp : int
The first sample point in the recording. This corresponds to
``raw.first_samp`` on files created with Elekta/Neuromag systems.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
html, dom_id = self._render_events(
events=events,
event_id=event_id,
sfreq=sfreq,
first_samp=first_samp,
title=title,
image_format=self.image_format,
tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_projs(self, *, info, projs=None, title, topomap_kwargs=None,
tags=('ssp',), replace=False):
"""Render (SSP) projection vectors.
Parameters
----------
info : instance of Info | path-like
An `~mne.Info` structure or the path of a file containing one. This
is required to create the topographic plots.
projs : iterable of mne.Projection | path-like | None
The projection vectors to add to the report. Can be the path to a
file that will be loaded via `mne.read_proj`. If ``None``, the
projectors are taken from ``info['projs']``.
title : str
The title corresponding to the `~mne.Projection` object.
%(topomap_kwargs)s
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
output = self._render_ssp_projs(
info=info, projs=projs, title=title,
image_format=self.image_format, tags=tags,
topomap_kwargs=topomap_kwargs,
)
if output is None:
raise ValueError(
'The provided data does not contain digitization information. '
'However, this is required for rendering the SSP projectors.'
)
else:
html, dom_id = output
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
def _render_ica_overlay(self, *, ica, inst, image_format, tags):
if isinstance(inst, BaseRaw):
inst_ = inst
else: # Epochs
inst_ = inst.average()
fig = ica.plot_overlay(inst=inst_, show=False)
del inst_
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig, image_format=image_format)
dom_id = self._get_dom_id()
overlay_html = _html_image_element(
img=img, div_klass='ica', img_klass='ica',
title='Original and cleaned signal', caption=None, show=True,
image_format=image_format, id=dom_id, tags=tags
)
return overlay_html
def _render_ica_properties(self, *, ica, picks, inst, n_jobs, image_format,
tags):
ch_type = _get_ch_type(inst=ica.info, ch_type=None)
if not _check_ch_locs(info=ica.info, ch_type=ch_type):
ch_type_name = _handle_default("titles")[ch_type]
warn(f'No {ch_type_name} channel locations found, cannot '
f'create ICA properties plots')
return ''
figs = _plot_ica_properties_as_arrays(
ica=ica, inst=inst, picks=picks, n_jobs=n_jobs
)
rel_explained_var = (ica.pca_explained_variance_ /
ica.pca_explained_variance_.sum())
cum_explained_var = np.cumsum(rel_explained_var)
captions = []
for idx, rel_var, cum_var in zip(
range(len(figs)),
rel_explained_var[:len(figs)],
cum_explained_var[:len(figs)]
):
caption = (
f'ICA component {idx}. '
f'Variance explained: {round(100 * rel_var)}%'
)
if idx == 0:
caption += '.'
else:
caption += f' ({round(100 * cum_var)}% cumulative).'
captions.append(caption)
title = 'ICA component properties'
# Only render a slider if we have more than 1 component.
if len(figs) == 1:
img = _fig_to_img(fig=figs[0], image_format=image_format)
dom_id = self._get_dom_id()
properties_html = _html_image_element(
img=img, div_klass='ica', img_klass='ica',
title=title, caption=captions[0], show=True,
image_format=image_format, id=dom_id, tags=tags
)
else:
properties_html, _ = self._render_slider(
figs=figs, imgs=None, title=title, captions=captions,
start_idx=0, image_format=image_format, tags=tags
)
return properties_html
def _render_ica_artifact_sources(self, *, ica, inst, artifact_type,
image_format, tags):
with use_browser_backend('matplotlib'):
fig = ica.plot_sources(inst=inst, show=False)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig, image_format=image_format)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, div_klass='ica', img_klass='ica',
title=f'Original and cleaned {artifact_type} epochs', caption=None,
show=True, image_format=image_format, id=dom_id, tags=tags
)
return html
def _render_ica_artifact_scores(self, *, ica, scores, artifact_type,
image_format, tags):
fig = ica.plot_scores(scores=scores, title=None, show=False)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig, image_format=image_format)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, div_klass='ica', img_klass='ica',
title=f'Scores for matching {artifact_type} patterns',
caption=None, show=True, image_format=image_format, id=dom_id,
tags=tags
)
return html
def _render_ica_components(self, *, ica, picks, image_format, tags):
ch_type = _get_ch_type(inst=ica.info, ch_type=None)
if not _check_ch_locs(info=ica.info, ch_type=ch_type):
ch_type_name = _handle_default("titles")[ch_type]
warn(f'No {ch_type_name} channel locations found, cannot '
f'create ICA component plots')
return ''
figs = ica.plot_components(
picks=picks, title='', colorbar=True, show=False
)
if not isinstance(figs, list):
figs = [figs]
for fig in figs:
tight_layout(fig=fig)
title = 'ICA component topographies'
if len(figs) == 1:
fig = figs[0]
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
dom_id = self._get_dom_id()
topographies_html = _html_image_element(
img=img, div_klass='ica', img_klass='ica',
title=title, caption=None, show=True,
image_format=image_format, id=dom_id, tags=tags
)
else:
captions = [None] * len(figs)
topographies_html, _ = self._render_slider(
figs=figs, imgs=None, title=title, captions=captions,
start_idx=0, image_format=image_format, tags=tags
)
return topographies_html
def _render_ica(self, *, ica, inst, picks, ecg_evoked,
eog_evoked, ecg_scores, eog_scores, title, image_format,
tags, n_jobs):
if _path_like(ica):
ica = read_ica(ica)
if ica.current_fit == 'unfitted':
raise RuntimeError(
'ICA must be fitted before it can be added to the report.'
)
if inst is None:
pass # no-op
elif _path_like(inst):
# We cannot know which data type to expect, so let's first try to
# read a Raw, and if that fails, try to load Epochs
fname = str(inst) # could e.g. be a Path!
raw_kwargs = dict(fname=fname, preload=False)
if fname.endswith(('.fif', '.fif.gz')):
raw_kwargs['allow_maxshield'] = True
try:
inst = read_raw(**raw_kwargs)
except ValueError:
try:
inst = read_epochs(fname)
except ValueError:
raise ValueError(
f'The specified file, {fname}, does not seem to '
f'contain Raw data or Epochs'
)
elif not inst.preload:
raise RuntimeError(
'You passed an object to Report.add_ica() via the "inst" '
'parameter that was not preloaded. Please preload the data '
'via the load_data() method'
)
if _path_like(ecg_evoked):
ecg_evoked = read_evokeds(fname=ecg_evoked, condition=0)
if _path_like(eog_evoked):
eog_evoked = read_evokeds(fname=eog_evoked, condition=0)
# Summary table
dom_id = self._get_dom_id()
repr_html = _html_element(
div_klass='ica',
id=dom_id,
tags=tags,
title='Info',
html=ica._repr_html_()
)
# Overlay plot
if inst:
overlay_html = self._render_ica_overlay(
ica=ica, inst=inst, image_format=image_format, tags=tags
)
else:
overlay_html = ''
# ECG artifact
if ecg_scores is not None:
ecg_scores_html = self._render_ica_artifact_scores(
ica=ica, scores=ecg_scores, artifact_type='ECG',
image_format=image_format, tags=tags
)
else:
ecg_scores_html = ''
if ecg_evoked:
ecg_html = self._render_ica_artifact_sources(
ica=ica, inst=ecg_evoked, artifact_type='ECG',
image_format=image_format, tags=tags
)
else:
ecg_html = ''
# EOG artifact
if eog_scores is not None:
eog_scores_html = self._render_ica_artifact_scores(
ica=ica, scores=eog_scores, artifact_type='EOG',
image_format=image_format, tags=tags
)
else:
eog_scores_html = ''
if eog_evoked:
eog_html = self._render_ica_artifact_sources(
ica=ica, inst=eog_evoked, artifact_type='EOG',
image_format=image_format, tags=tags
)
else:
eog_html = ''
# Component topography plots
topographies_html = self._render_ica_components(
ica=ica, picks=picks, image_format=image_format, tags=tags
)
# Properties plots
if inst:
properties_html = self._render_ica_properties(
ica=ica, picks=picks, inst=inst, n_jobs=n_jobs,
image_format=image_format, tags=tags
)
else:
properties_html = ''
dom_id = self._get_dom_id()
html = _html_ica_element(
id=dom_id,
repr=repr_html,
overlay=overlay_html,
ecg=ecg_html,
eog=eog_html,
ecg_scores=ecg_scores_html,
eog_scores=eog_scores_html,
properties=properties_html,
topographies=topographies_html,
title=title,
tags=tags
)
return dom_id, html
@fill_doc
def add_ica(
self, ica, title, *, inst, picks=None, ecg_evoked=None,
eog_evoked=None, ecg_scores=None, eog_scores=None, n_jobs=1,
tags=('ica',), replace=False
):
"""Add (a fitted) `~mne.preprocessing.ICA` to the report.
Parameters
----------
ica : path-like | instance of mne.preprocessing.ICA
The fitted ICA to add.
title : str
The title to add.
inst : path-like | mne.io.Raw | mne.Epochs | None
The data to use for visualization of the effects of ICA cleaning.
To only plot the ICA component topographies, explicitly pass
``None``.
%(picks_ica)s If ``None``, plot all components. This only affects
the behavior of the component topography and properties plots.
ecg_evoked, eog_evoked : path-line | mne.Evoked | None
Evoked signal based on ECG and EOG epochs, respectively. If passed,
will be used to visualize the effects of artifact rejection.
ecg_scores, eog_scores : array of float | list of array of float | None
The scores produced by :meth:`mne.preprocessing.ICA.find_bads_ecg`
and :meth:`mne.preprocessing.ICA.find_bads_eog`, respectively.
If passed, will be used to visualize the scoring for each ICA
component.
%(n_jobs)s
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
dom_id, html = self._render_ica(
ica=ica, inst=inst, picks=picks,
ecg_evoked=ecg_evoked, eog_evoked=eog_evoked,
ecg_scores=ecg_scores, eog_scores=eog_scores,
title=title, image_format=self.image_format, tags=tags,
n_jobs=n_jobs
)
self._add_or_replace(
name=title,
dom_id=dom_id,
tags=tags,
html=html,
replace=replace
)
def remove(self, *, title=None, tags=None, remove_all=False):
"""Remove elements from the report.
The element to remove is searched for by its title. Optionally, tags
may be specified as well to narrow down the search to elements that
have the supplied tags.
Parameters
----------
title : str
The title of the element(s) to remove.
.. versionadded:: 0.24.0
tags : array-like of str | str | None
If supplied, restrict the operation to elements with the supplied
tags.
.. versionadded:: 0.24.0
remove_all : bool
Controls the behavior if multiple elements match the search
criteria. If ``False`` (default) only the element last added to the
report will be removed. If ``True``, all matches will be removed.
.. versionadded:: 0.24.0
Returns
-------
removed_index : int | tuple of int | None
The indices of the elements that were removed, or ``None`` if no
element matched the search criteria. A tuple will always be
returned if ``remove_all`` was set to ``True`` and at least one
element was removed.
.. versionchanged:: 0.24.0
Returns tuple if ``remove_all`` is ``True``.
"""
remove_idx = []
for idx, element in enumerate(self._content):
if element.name == title:
if (tags is not None and
not all(t in element.tags for t in tags)):
continue
remove_idx.append(idx)
if not remove_idx:
remove_idx = None
elif not remove_all: # only remove last occurrence
remove_idx = remove_idx[-1]
del self._content[remove_idx]
else: # remove all occurrences
remove_idx = tuple(remove_idx)
self._content = [e for idx, e in enumerate(self._content)
if idx not in remove_idx]
return remove_idx
def _add_or_replace(self, *, name, dom_id, tags, html, replace=False):
"""Append HTML content report, or replace it if it already exists.
Parameters
----------
name : str
The entry under which the content shall be listed in the table of
contents. If it already exists, the content will be replaced if
``replace`` is ``True``
dom_id : str
A unique element ``id`` in the DOM.
tags : tuple of str
The tags associated with the added element.
html : str
The HTML.
replace : bool
Whether to replace existing content.
"""
assert isinstance(html, str) # otherwise later will break
new_content = _ContentElement(
name=name,
dom_id=dom_id,
tags=tags,
html=html
)
existing_names = [element.name for element in self._content]
if name in existing_names and replace:
# Find and replace existing content, starting from the last element
for idx, content_element in enumerate(self._content[::-1]):
if content_element.name == name:
self._content[idx] = new_content
return
raise RuntimeError('This should never happen')
else:
# Simply append new content (no replace)
self._content.append(new_content)
def _render_code(self, *, code, title, language, tags):
if isinstance(code, Path):
code = Path(code).read_text()
dom_id = self._get_dom_id()
html = _html_code_element(
tags=tags,
title=title,
id=dom_id,
code=code,
language=language
)
return html, dom_id
@fill_doc
def add_code(self, code, title, *, language='python', tags=('code',),
replace=False):
"""Add a code snippet (e.g., an analysis script) to the report.
Parameters
----------
code : str | pathlib.Path
The code to add to the report as a string, or the path to a file
as a `pathlib.Path` object.
.. note:: Paths must be passed as `pathlib.Path` object, since
strings will be treated as literal code.
title : str
The title corresponding to the code.
language : str
The programming language of ``code``. This will be used for syntax
highlighting. Can be ``'auto'`` to try to auto-detect the language.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
language = language.lower()
html, dom_id = self._render_code(
code=code, title=title, language=language, tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_sys_info(self, title, *, tags=('mne-sysinfo',)):
"""Add a MNE-Python system information to the report.
This is a convenience method that captures the output of
`mne.sys_info` and adds it to the report.
Parameters
----------
title : str
The title to assign.
%(tags_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
with contextlib.redirect_stdout(StringIO()) as f:
sys_info()
info = f.getvalue()
self.add_code(code=info, title=title, language='shell', tags=tags)
@fill_doc
def add_figure(self, fig, title, *, caption=None, image_format=None,
tags=('custom-figure',), replace=False):
"""Add figures to the report.
Parameters
----------
fig : matplotlib.figure.Figure | Figure3D | array | array-like of matplotlib.figure.Figure | array-like of Figure3D | array-like of array
One or more figures to add to the report. All figures must be an
instance of :class:`matplotlib.figure.Figure`,
:class:`mne.viz.Figure3D`, or :class:`numpy.ndarray`. If
multiple figures are passed, they will be added as "slides"
that can be navigated using buttons and a slider element.
title : str
The title corresponding to the figure(s).
caption : str | array-like of str | None
The caption(s) to add to the figure(s).
%(image_format_report)s
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
""" # noqa E501
tags = _check_tags(tags)
if image_format is None:
image_format = self.image_format
if hasattr(fig, '__len__') and not isinstance(fig, np.ndarray):
figs = tuple(fig)
else:
figs = (fig,)
for fig in figs:
if _path_like(fig):
raise TypeError(
f'It seems you passed a path to `add_figure`. However, '
f'only Matplotlib figures, PyVista scenes, and NumPy '
f'arrays are accepted. You may want to try `add_image` '
f'instead. The provided path was: {fig}'
)
del fig
if isinstance(caption, str):
captions = (caption,)
elif caption is None and len(figs) == 1:
captions = [None]
elif caption is None and len(figs) > 1:
captions = [f'Figure {i+1}' for i in range(len(figs))]
else:
captions = tuple(caption)
del caption
assert figs
if len(figs) == 1:
img = _fig_to_img(fig=figs[0], image_format=image_format,
own_figure=False)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, div_klass='custom-image', img_klass='custom-image',
title=title, caption=captions[0], show=True,
image_format=image_format, id=dom_id, tags=tags
)
else:
html, dom_id = self._render_slider(
figs=figs, imgs=None, title=title, captions=captions,
start_idx=0, image_format=image_format, tags=tags,
own_figure=False
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
@fill_doc
def add_image(self, image, title, *, caption=None, tags=('custom-image',),
replace=False):
"""Add an image (e.g., PNG or JPEG pictures) to the report.
Parameters
----------
image : path-like
The image to add.
title : str
Title corresponding to the images.
caption : str | None
If not ``None``, the caption to add to the image.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
img_bytes = Path(image).expanduser().read_bytes()
img_base64 = base64.b64encode(img_bytes).decode('ascii')
del img_bytes # Free memory
img_format = Path(image).suffix.lower()[1:] # omit leading period
_check_option('Image format', value=img_format,
allowed_values=('png', 'gif', 'svg'))
dom_id = self._get_dom_id()
img_html = _html_image_element(
img=img_base64, div_klass='custom-image',
img_klass='custom-image', title=title, caption=caption,
show=True, image_format=img_format, id=dom_id,
tags=tags
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=img_html,
replace=replace
)
@fill_doc
def add_html(self, html, title, *, tags=('custom-html',), replace=False):
"""Add HTML content to the report.
Parameters
----------
html : str
The HTML content to add.
title : str
The title corresponding to ``html``.
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
dom_id = self._get_dom_id()
html_element = _html_element(
id=dom_id, html=html, title=title, tags=tags,
div_klass='custom-html'
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html_element,
replace=replace
)
@fill_doc
def add_bem(self, subject, title, *, subjects_dir=None, decim=2, width=512,
n_jobs=1, tags=('bem',), replace=False):
"""Render a visualization of the boundary element model (BEM) surfaces.
Parameters
----------
subject : str
The FreeSurfer subject name.
title : str
The title corresponding to the BEM image.
%(subjects_dir)s
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
width : int
The width of the MRI images (in pixels). Larger values will have
clearer surface lines, but will create larger HTML files.
Typically a factor of 2 more than the number of MRI voxels along
each dimension (typically 512, default) is reasonable.
%(n_jobs)s
%(tags_report)s
%(replace_report)s
Notes
-----
.. versionadded:: 0.24.0
"""
tags = _check_tags(tags)
width = _ensure_int(width, 'width')
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, width=width,
image_format=self.image_format, tags=tags)
dom_id = self._get_dom_id()
html = _html_element(
div_klass='bem',
id=dom_id,
tags=tags,
title=title,
html=html,
)
self._add_or_replace(
dom_id=dom_id,
name=title,
tags=tags,
html=html,
replace=replace
)
def _render_slider(self, *, figs, imgs, title, captions, start_idx,
image_format, tags, klass='', own_figure=True):
if figs is not None and imgs is not None:
raise ValueError('Must only provide either figs or imgs')
if figs is not None and len(figs) != len(captions):
raise ValueError(
f'Number of captions ({len(captions)}) must be equal to the '
f'number of figures ({len(figs)})'
)
elif imgs is not None and len(imgs) != len(captions):
raise ValueError(
f'Number of captions ({len(captions)}) must be equal to the '
f'number of images ({len(imgs)})'
)
elif figs: # figs can be None if imgs is provided
imgs = [_fig_to_img(fig=fig, image_format=image_format,
own_figure=own_figure)
for fig in figs]
dom_id = self._get_dom_id()
html = _html_slider_element(
id=dom_id,
title=title,
captions=captions,
tags=tags,
images=imgs,
image_format=image_format,
start_idx=start_idx,
klass=klass
)
return html, dom_id
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer."""
inc_fnames = [
'jquery-3.6.0.min.js',
'bootstrap.bundle.min.js',
'bootstrap.min.css',
'bootstrap-table/bootstrap-table.min.js',
'bootstrap-table/bootstrap-table.min.css',
'bootstrap-table/bootstrap-table-copy-rows.min.js',
'bootstrap-table/bootstrap-table-export.min.js',
'bootstrap-table/tableExport.min.js',
'bootstrap-icons/bootstrap-icons.mne.min.css',
'highlightjs/highlight.min.js',
'highlightjs/atom-one-dark-reasonable.min.css'
]
include = list()
for inc_fname in inc_fnames:
logger.info(f'Embedding : {inc_fname}')
fname = html_include_dir / inc_fname
file_content = fname.read_text(encoding='utf-8')
if inc_fname.endswith('.js'):
include.append(
f'<script type="text/javascript">\n'
f'{file_content}\n'
f'</script>'
)
elif inc_fname.endswith('.css'):
include.append(
f'<style type="text/css">\n'
f'{file_content}\n'
f'</style>'
)
self.include = ''.join(include)
def _iterate_files(self, *, fnames, cov, sfreq, raw_butterfly,
n_time_points_evokeds, n_time_points_stcs, on_error,
stc_plot_kwargs, topomap_kwargs):
"""Parallel process in batch mode."""
assert self.data_path is not None
for fname in fnames:
logger.info(
f"Rendering : {op.join('…' + self.data_path[-20:], fname)}"
)
title = Path(fname).name
try:
if _endswith(fname, ['raw', 'sss', 'meg', 'nirs']):
self.add_raw(
raw=fname, title=title, psd=self.raw_psd,
projs=self.projs, butterfly=raw_butterfly
)
elif _endswith(fname, 'fwd'):
self.add_forward(
forward=fname, title=title, subject=self.subject,
subjects_dir=self.subjects_dir
)
elif _endswith(fname, 'inv'):
# XXX if we pass trans, we can plot the source space, too…
self.add_inverse_operator(
inverse_operator=fname, title=title
)
elif _endswith(fname, 'ave'):
evokeds = read_evokeds(fname)
titles = [
f'{Path(fname).name}: {e.comment}'
for e in evokeds
]
self.add_evokeds(
evokeds=fname, titles=titles, noise_cov=cov,
n_time_points=n_time_points_evokeds,
topomap_kwargs=topomap_kwargs
)
elif _endswith(fname, 'eve'):
if self.info_fname is not None:
sfreq = read_info(self.info_fname)['sfreq']
else:
sfreq = None
self.add_events(events=fname, title=title, sfreq=sfreq)
elif _endswith(fname, 'epo'):
self.add_epochs(epochs=fname, title=title)
elif _endswith(fname, 'cov') and self.info_fname is not None:
self.add_covariance(cov=fname, info=self.info_fname,
title=title)
elif _endswith(fname, 'proj') and self.info_fname is not None:
self.add_projs(info=self.info_fname, projs=fname,
title=title, topomap_kwargs=topomap_kwargs)
# XXX TODO We could render ICA components here someday
# elif _endswith(fname, 'ica') and ica:
# pass
elif (_endswith(fname, 'trans') and
self.info_fname is not None and
self.subjects_dir is not None and
self.subject is not None):
self.add_trans(
trans=fname, info=self.info_fname,
subject=self.subject, subjects_dir=self.subjects_dir,
title=title
)
elif (fname.endswith('-lh.stc') or
fname.endswith('-rh.stc') and
self.info_fname is not None and
self.subjects_dir is not None and
self.subject is not None):
self.add_stc(
stc=fname, title=title, subject=self.subject,
subjects_dir=self.subjects_dir,
n_time_points=n_time_points_stcs,
stc_plot_kwargs=stc_plot_kwargs
)
except Exception as e:
if on_error == 'warn':
warn(f'Failed to process file {fname}:\n"{e}"')
elif on_error == 'raise':
raise
@verbose
def parse_folder(self, data_path, pattern=None, n_jobs=1, mri_decim=2,
sort_content=True, *, on_error='warn',
image_format=None, render_bem=True,
n_time_points_evokeds=None, n_time_points_stcs=None,
raw_butterfly=True, stc_plot_kwargs=None,
topomap_kwargs=None, verbose=None):
r"""Render all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : None | str | list of str
Filename pattern(s) to include in the report.
For example, ``[\*raw.fif, \*ave.fif]`` will include `~mne.io.Raw`
as well as `~mne.Evoked` files. If ``None``, include all supported
file formats.
.. versionchanged:: 0.23
Include supported non-FIFF files by default.
%(n_jobs)s
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_content : bool
If ``True``, sort the content based on tags in the order:
raw -> events -> epochs -> evoked -> covariance -> coregistration
-> bem -> forward-solution -> inverse-operator -> source-estimate.
.. versionadded:: 0.24.0
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
%(image_format_report)s
.. versionadded:: 0.15
render_bem : bool
If True (default), try to render the BEM.
.. versionadded:: 0.16
n_time_points_evokeds, n_time_points_stcs : int | None
The number of equidistant time points to render for `~mne.Evoked`
and `~mne.SourceEstimate` data, respectively. If ``None``,
will render each `~mne.Evoked` at 21 and each `~mne.SourceEstimate`
at 51 time points, unless the respective data contains fewer time
points, in which call all will be rendered.
.. versionadded:: 0.24.0
raw_butterfly : bool
Whether to render butterfly plots for (decimated) `~mne.io.Raw`
data.
.. versionadded:: 0.24.0
%(stc_plot_kwargs_report)s
.. versionadded:: 0.24.0
%(topomap_kwargs)s
.. versionadded:: 0.24.0
%(verbose)s
"""
_validate_type(data_path, 'path-like', 'data_path')
data_path = str(data_path)
image_format = _check_image_format(self, image_format)
_check_option('on_error', on_error, ['ignore', 'warn', 'raise'])
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = f'MNE Report for {self.data_path[-20:]}'
if pattern is None:
pattern = [f'*{ext}' for ext in SUPPORTED_READ_RAW_EXTENSIONS]
elif not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
data_path = _check_fname(
fname=self.data_path, overwrite='read', must_exist=True,
name='Directory or folder', need_dir=True
)
fnames.extend(sorted(_recursive_search(data_path, p)))
if not fnames and not render_bem:
raise RuntimeError(f'No matching files found in {self.data_path}')
fnames_to_remove = []
for fname in fnames:
# For split files, only keep the first one.
if _endswith(fname, ('raw', 'sss', 'meg')):
kwargs = dict(fname=fname, preload=False)
if fname.endswith(('.fif', '.fif.gz')):
kwargs['allow_maxshield'] = True
inst = read_raw(**kwargs)
if len(inst.filenames) > 1:
fnames_to_remove.extend(inst.filenames[1:])
# For STCs, only keep one hemisphere
elif fname.endswith('-lh.stc') or fname.endswith('-rh.stc'):
first_hemi_fname = fname
if first_hemi_fname.endswidth('-lh.stc'):
second_hemi_fname = (first_hemi_fname
.replace('-lh.stc', '-rh.stc'))
else:
second_hemi_fname = (first_hemi_fname
.replace('-rh.stc', '-lh.stc'))
if (second_hemi_fname in fnames and
first_hemi_fname not in fnames_to_remove):
fnames_to_remove.extend(first_hemi_fname)
else:
continue
fnames_to_remove = list(set(fnames_to_remove)) # Drop duplicates
for fname in fnames_to_remove:
if fname in fnames:
del fnames[fnames.index(fname)]
del fnames_to_remove
if self.info_fname is not None:
info = read_info(self.info_fname, verbose=False)
sfreq = info['sfreq']
else:
# only warn if relevant
if any(_endswith(fname, 'cov') for fname in fnames):
warn('`info_fname` not provided. Cannot render '
'-cov.fif(.gz) files.')
if any(_endswith(fname, 'trans') for fname in fnames):
warn('`info_fname` not provided. Cannot render '
'-trans.fif(.gz) files.')
if any(_endswith(fname, 'proj') for fname in fnames):
warn('`info_fname` not provided. Cannot render '
'-proj.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
# render plots in parallel; check that n_jobs <= # of files
logger.info(f'Iterating over {len(fnames)} potential files '
f'(this may take some ')
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(self._iterate_files, use_jobs)
parallel(
p_fun(
fnames=fname, cov=cov, sfreq=sfreq,
raw_butterfly=raw_butterfly,
n_time_points_evokeds=n_time_points_evokeds,
n_time_points_stcs=n_time_points_stcs, on_error=on_error,
stc_plot_kwargs=stc_plot_kwargs, topomap_kwargs=topomap_kwargs,
) for fname in np.array_split(fnames, use_jobs)
)
# Render BEM
if render_bem:
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.add_bem(
subject=self.subject, subjects_dir=self.subjects_dir,
title='BEM surfaces', decim=mri_decim, n_jobs=n_jobs
)
else:
warn('`subjects_dir` and `subject` not provided. Cannot '
'render MRI and -trans.fif(.gz) files.')
if sort_content:
self._content = self._sort(
content=self._content, order=CONTENT_ORDER
)
def __getstate__(self):
"""Get the state of the report as a dictionary."""
state = dict()
for param_name in self._get_state_params():
param_val = getattr(self, param_name)
# Workaround as h5io doesn't support dataclasses
if param_name == '_content':
assert all(dataclasses.is_dataclass(val) for val in param_val)
param_val = [dataclasses.asdict(val) for val in param_val]
state[param_name] = param_val
return state
def __setstate__(self, state):
"""Set the state of the report."""
for param_name in self._get_state_params():
param_val = state[param_name]
# Workaround as h5io doesn't support dataclasses
if param_name == '_content':
param_val = [_ContentElement(**val) for val in param_val]
setattr(self, param_name, param_val)
return state
@verbose
def save(self, fname=None, open_browser=True, overwrite=False,
sort_content=False, *, verbose=None):
"""Save the report and optionally open it in browser.
Parameters
----------
fname : path-like | None
Output filename. If the name ends with ``.h5`` or ``.hdf5``, the
report is saved in HDF5 format, so it can later be loaded again
with :func:`open_report`. For any other suffix, the report will be
saved in HTML format. If ``None`` and :meth:`Report.parse_folder`
was **not** called, the report is saved as ``report.html`` in the
current working directory. If ``None`` and
:meth:`Report.parse_folder` **was** used, the report is saved as
``report.html`` inside the ``data_path`` supplied to
:meth:`Report.parse_folder`.
open_browser : bool
Whether to open the rendered HTML report in the default web browser
after saving. This is ignored when writing an HDF5 file.
%(overwrite)s
sort_content : bool
If ``True``, sort the content based on tags before saving in the
order:
raw -> events -> epochs -> evoked -> covariance -> coregistration
-> bem -> forward-solution -> inverse-operator -> source-estimate.
.. versionadded:: 0.24.0
%(verbose)s
Returns
-------
fname : str
The file name to which the report was saved.
"""
if fname is None:
if self.data_path is None:
self.data_path = os.getcwd()
warn(f'`data_path` not provided. Using {self.data_path} '
f'instead')
fname = op.join(self.data_path, 'report.html')
fname = _check_fname(fname, overwrite=overwrite, name=fname)
fname = op.realpath(fname) # resolve symlinks
if sort_content:
self._content = self._sort(
content=self._content, order=CONTENT_ORDER
)
if not overwrite and op.isfile(fname):
msg = (f'Report already exists at location {fname}. '
f'Overwrite it (y/[n])? ')
answer = _safe_input(msg, alt='pass overwrite=True')
if answer.lower() == 'y':
overwrite = True
_, ext = op.splitext(fname)
is_hdf5 = ext.lower() in ['.h5', '.hdf5']
if overwrite or not op.isfile(fname):
logger.info(f'Saving report to : {fname}')
if is_hdf5:
_, write_hdf5 = _import_h5io_funcs()
write_hdf5(fname, self.__getstate__(), overwrite=overwrite,
title='mnepython')
else:
# Add header, TOC, and footer.
header_html = _html_header_element(
title=self.title, include=self.include, lang=self.lang,
tags=self.tags, js=JAVASCRIPT, css=CSS,
mne_logo_img=mne_logo
)
toc_html = _html_toc_element(content_elements=self._content)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
footer_html = _html_footer_element(
mne_version=MNE_VERSION,
date=time.strftime("%B %d, %Y")
)
html = [header_html, toc_html, *self.html, footer_html]
Path(fname).write_text(data=''.join(html), encoding='utf-8')
building_doc = os.getenv('_MNE_BUILDING_DOC', '').lower() == 'true'
if open_browser and not is_hdf5 and not building_doc:
webbrowser.open_new_tab('file://' + fname)
if self.fname is None:
self.fname = fname
return fname
def __enter__(self):
"""Do nothing when entering the context block."""
return self
def __exit__(self, type, value, traceback):
"""Save the report when leaving the context block."""
if self.fname is not None:
self.save(self.fname, open_browser=False, overwrite=True)
@staticmethod
def _sort(content, order):
"""Reorder content to reflect "natural" ordering."""
content_unsorted = content.copy()
content_sorted = []
content_sorted_idx = []
del content
# First arrange content with known tags in the predefined order
for tag in order:
for idx, content in enumerate(content_unsorted):
if tag in content.tags:
content_sorted_idx.append(idx)
content_sorted.append(content)
# Now simply append the rest (custom tags)
content_remaining = [
content for idx, content in enumerate(content_unsorted)
if idx not in content_sorted_idx
]
content_sorted = [*content_sorted, *content_remaining]
return content_sorted
def _render_one_bem_axis(self, *, mri_fname, surfaces,
image_format, orientation, decim=2, n_jobs=1,
width=512, tags):
"""Render one axis of bem contours (only PNG)."""
import nibabel as nib
nim = nib.load(mri_fname)
data = _reorient_image(nim)[0]
axis = _mri_orientation(orientation)[0]
n_slices = data.shape[axis]
sl = np.arange(0, n_slices, decim)
logger.debug(f'Rendering BEM {orientation} with {len(sl)} slices')
figs = _get_bem_contour_figs_as_arrays(
sl=sl, n_jobs=n_jobs, mri_fname=mri_fname, surfaces=surfaces,
orientation=orientation, src=None, show=False,
show_orientation='always', width=width
)
# Render the slider
captions = [f'Slice index: {i * decim}' for i in range(len(figs))]
start_idx = int(round(len(figs) / 2))
html, _ = self._render_slider(
figs=figs,
imgs=None,
captions=captions,
title=orientation,
image_format=image_format,
start_idx=start_idx,
tags=tags,
klass='bem col-md'
)
return html
def _render_raw_butterfly_segments(
self, *, raw: BaseRaw, n_segments, scalings, image_format, tags
):
# Pick n_segments + 2 equally-spaced 1-second time slices, but omit
# the first and last slice, so we end up with n_segments slices
n = n_segments + 2
times = np.linspace(raw.times[0], raw.times[-1], n)[1:-1]
t_starts = np.array([max(t - 0.5, 0) for t in times])
t_stops = np.array([min(t + 0.5, raw.times[-1]) for t in times])
durations = t_stops - t_starts
# Remove annotations before plotting for better performance.
# Ensure we later restore raw.annotations even in case of an exception
orig_annotations = raw.annotations.copy()
try:
raw.set_annotations(None)
# Create the figure once and re-use it for performance reasons
with use_browser_backend('matplotlib'):
fig = raw.plot(
butterfly=True, show_scrollbars=False, start=t_starts[0],
duration=durations[0], scalings=scalings, show=False
)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
images = [_fig_to_img(fig=fig, image_format=image_format)]
for start, duration in zip(t_starts[1:], durations[1:]):
fig.mne.t_start = start
fig.mne.duration = duration
fig._update_hscroll()
fig._redraw(annotations=False)
images.append(_fig_to_img(fig=fig, image_format=image_format))
except Exception:
raise
finally:
raw.set_annotations(orig_annotations)
del orig_annotations
captions = [f'Segment {i+1} of {len(images)}'
for i in range(len(images))]
html, _ = self._render_slider(
figs=None, imgs=images, title='Time series', captions=captions,
start_idx=0, image_format=image_format, tags=tags
)
return html
def _render_raw(self, *, raw, add_psd, add_projs, butterfly,
butterfly_scalings, image_format, tags, topomap_kwargs):
"""Render raw."""
if isinstance(raw, BaseRaw):
fname = raw.filenames[0]
else:
fname = str(raw) # could e.g. be a Path!
kwargs = dict(fname=fname, preload=False)
if fname.endswith(('.fif', '.fif.gz')):
kwargs['allow_maxshield'] = True
raw = read_raw(**kwargs)
# Summary table
dom_id = self._get_dom_id()
repr_html = _html_element(
div_klass='raw',
id=dom_id,
tags=tags,
title='Info',
html=raw._repr_html_()
)
# Butterfly plot
if butterfly:
n_butterfly_segments = 10 if butterfly is True else butterfly
butterfly_imgs_html = self._render_raw_butterfly_segments(
raw=raw, scalings=butterfly_scalings,
n_segments=n_butterfly_segments,
image_format=image_format, tags=tags
)
else:
butterfly_imgs_html = ''
# PSD
if isinstance(add_psd, dict):
dom_id = self._get_dom_id()
if raw.info['lowpass'] is not None:
fmax = raw.info['lowpass'] + 15
# Must not exceed half the sampling frequency
if fmax > 0.5 * raw.info['sfreq']:
fmax = np.inf
else:
fmax = np.inf
fig = raw.plot_psd(fmax=fmax, show=False, **add_psd)
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig, image_format=image_format)
psd_img_html = _html_image_element(
img=img, div_klass='raw', img_klass='raw',
title='PSD', caption=None, show=True,
image_format=image_format, id=dom_id, tags=tags
)
else:
psd_img_html = ''
ssp_projs_html = self._ssp_projs_html(
add_projs=add_projs, info=raw, image_format=image_format,
tags=tags, topomap_kwargs=topomap_kwargs)
return [repr_html, psd_img_html, butterfly_imgs_html, ssp_projs_html]
def _ssp_projs_html(self, *, add_projs, info, image_format, tags,
topomap_kwargs):
if add_projs:
output = self._render_ssp_projs(
info=info, projs=None, title='SSP Projectors',
image_format=image_format, tags=tags,
topomap_kwargs=topomap_kwargs,
)
if output is None:
ssp_projs_html = ''
else:
ssp_projs_html, _ = output
else:
ssp_projs_html = ''
return ssp_projs_html
def _render_ssp_projs(self, *, info, projs, title, image_format, tags,
topomap_kwargs):
if isinstance(info, Info): # no-op
pass
elif hasattr(info, 'info'): # try to get the file name
if isinstance(info, BaseRaw):
fname = info.filenames[0]
# elif isinstance(info, (Evoked, BaseEpochs)):
# fname = info.filename
else:
fname = ''
info = info.info
else: # read from a file
fname = info
info = read_info(fname, verbose=False)
if projs is None:
projs = info['projs']
elif not isinstance(projs, list):
fname = projs
projs = read_proj(fname)
if not projs: # Abort mission!
return None
if not _check_ch_locs(info=info):
warn('No channel locations found, cannot create projector plots')
return '', None
topomap_kwargs = self._validate_topomap_kwargs(topomap_kwargs)
fig = plot_projs_topomap(
projs=projs, info=info, colorbar=True, vlim='joint',
show=False, **topomap_kwargs
)
# TODO This seems like a bad idea, better to provide a way to set a
# desired size in plot_projs_topomap, but that uses prepare_trellis...
# hard to see how (6, 4) could work in all number-of-projs by
# number-of-channel-types conditions...
fig.set_size_inches((6, 4))
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, div_klass='ssp', img_klass='ssp',
title=title, caption=None, show=True, image_format=image_format,
id=dom_id, tags=tags
)
return html, dom_id
def _render_forward(self, *, forward, subject, subjects_dir, title,
image_format, tags):
"""Render forward solution."""
if not isinstance(forward, Forward):
forward = read_forward_solution(forward)
subject = self.subject if subject is None else subject
subjects_dir = (self.subjects_dir if subjects_dir is None
else subjects_dir)
# XXX Todo
# Render sensitivity maps
if subject is not None:
sensitivity_maps_html = ''
else:
sensitivity_maps_html = ''
dom_id = self._get_dom_id()
html = _html_forward_sol_element(
id=dom_id,
repr=forward._repr_html_(),
sensitivity_maps=sensitivity_maps_html,
title=title,
tags=tags
)
return html, dom_id
def _render_inverse_operator(self, *, inverse_operator, subject,
subjects_dir, trans, title, image_format,
tags):
"""Render inverse operator."""
if not isinstance(inverse_operator, InverseOperator):
inverse_operator = read_inverse_operator(inverse_operator)
if trans is not None and not isinstance(trans, Transform):
trans = read_trans(trans)
subject = self.subject if subject is None else subject
subjects_dir = (self.subjects_dir if subjects_dir is None
else subjects_dir)
# XXX Todo Render source space?
# if subject is not None and trans is not None:
# src = inverse_operator['src']
# fig = plot_alignment(
# subject=subject,
# subjects_dir=subjects_dir,
# trans=trans,
# surfaces='white',
# src=src
# )
# set_3d_view(fig, focalpoint=(0., 0., 0.06))
# img = _fig_to_img(fig=fig, image_format=image_format)
# dom_id = self._get_dom_id()
# src_img_html = _html_image_element(
# img=img,
# div_klass='inverse-operator source-space',
# img_klass='inverse-operator source-space',
# title='Source space', caption=None, show=True,
# image_format=image_format, id=dom_id,
# tags=tags
# )
# else:
src_img_html = ''
dom_id = self._get_dom_id()
html = _html_inverse_operator_element(
id=dom_id,
repr=inverse_operator._repr_html_(),
source_space=src_img_html,
title=title,
tags=tags,
)
return html, dom_id
def _render_evoked_joint(self, evoked, ch_types, image_format, tags,
topomap_kwargs):
htmls = []
for ch_type in ch_types:
if not _check_ch_locs(info=evoked.info, ch_type=ch_type):
ch_type_name = _handle_default("titles")[ch_type]
warn(f'No {ch_type_name} channel locations found, cannot '
f'create joint plot')
continue
with use_log_level(False):
fig = evoked.copy().pick(ch_type, verbose=False).plot_joint(
ts_args=dict(gfp=True),
title=None,
show=False,
topomap_args=topomap_kwargs,
)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
title = f'Time course ({_handle_default("titles")[ch_type]})'
dom_id = self._get_dom_id()
htmls.append(
_html_image_element(
img=img,
div_klass='evoked evoked-joint',
img_klass='evoked evoked-joint',
tags=tags,
title=title,
caption=None,
show=True,
image_format=image_format,
id=dom_id
)
)
html = '\n'.join(htmls)
return html
def _plot_one_evoked_topomap_timepoint(
self, *, evoked, time, ch_types, vmin, vmax, topomap_kwargs
):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(
1, len(ch_types) * 2,
gridspec_kw={
'width_ratios': [8, 0.5] * len(ch_types)
},
figsize=(2.5 * len(ch_types), 2)
)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
ch_type_ax_map = dict(
zip(ch_types,
[(ax[i], ax[i + 1]) for i in
range(0, 2 * len(ch_types) - 1, 2)])
)
for ch_type in ch_types:
evoked.plot_topomap(
times=[time], ch_type=ch_type,
vmin=vmin[ch_type], vmax=vmax[ch_type],
axes=ch_type_ax_map[ch_type], show=False,
**topomap_kwargs
)
ch_type_ax_map[ch_type][0].set_title(ch_type)
tight_layout(fig=fig)
with BytesIO() as buff:
fig.savefig(
buff,
format='png',
pad_inches=0
)
plt.close(fig)
buff.seek(0)
fig_array = plt.imread(buff, format='png')
return fig_array
def _render_evoked_topomap_slider(self, *, evoked, ch_types, n_time_points,
image_format, tags, topomap_kwargs,
n_jobs):
if n_time_points is None:
n_time_points = min(len(evoked.times), 21)
elif n_time_points > len(evoked.times):
raise ValueError(
f'The requested number of time points ({n_time_points}) '
f'exceeds the time points in the provided Evoked object '
f'({len(evoked.times)})'
)
if n_time_points == 1: # only a single time point, pick the first one
times = [evoked.times[0]]
else:
times = np.linspace(
start=evoked.tmin,
stop=evoked.tmax,
num=n_time_points
)
t_zero_idx = np.abs(times).argmin() # index closest to zero
# global min and max values for each channel type
scalings = dict(eeg=1e6, grad=1e13, mag=1e15)
vmax = dict()
vmin = dict()
for ch_type in ch_types:
if not _check_ch_locs(info=evoked.info, ch_type=ch_type):
ch_type_name = _handle_default("titles")[ch_type]
warn(f'No {ch_type_name} channel locations found, cannot '
f'create topography plots')
continue
vmax[ch_type] = (np.abs(evoked.copy()
.pick(ch_type, verbose=False)
.data)
.max()) * scalings[ch_type]
if ch_type == 'grad':
vmin[ch_type] = 0
else:
vmin[ch_type] = -vmax[ch_type]
if not (vmin and vmax): # we only had EEG data and no digpoints
html = ''
dom_id = None
else:
topomap_kwargs = self._validate_topomap_kwargs(topomap_kwargs)
use_jobs = min(n_jobs, max(1, len(times)))
parallel, p_fun, _ = parallel_func(
func=self._plot_one_evoked_topomap_timepoint,
n_jobs=use_jobs
)
fig_arrays = parallel(
p_fun(
evoked=evoked, time=time, ch_types=ch_types,
vmin=vmin, vmax=vmax, topomap_kwargs=topomap_kwargs
) for time in times
)
captions = [f'Time point: {round(t, 3):0.3f} s' for t in times]
html, dom_id = self._render_slider(
figs=fig_arrays,
imgs=None,
captions=captions,
title='Topographies',
image_format=image_format,
start_idx=t_zero_idx,
tags=tags
)
return html, dom_id
def _render_evoked_gfp(self, evoked, ch_types, image_format, tags):
# Make legend labels shorter by removing the multiplicative factors
pattern = r'\d\.\d* × '
label = evoked.comment
if label is None:
label = ''
for match in re.findall(pattern=pattern, string=label):
label = label.replace(match, '')
dom_id = self._get_dom_id()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(len(ch_types), 1, sharex=True)
if len(ch_types) == 1:
ax = [ax]
for idx, ch_type in enumerate(ch_types):
plot_compare_evokeds(
evokeds={
label: evoked.copy().pick(ch_type, verbose=False)
},
ci=None, truncate_xaxis=False,
truncate_yaxis=False, legend=False,
axes=ax[idx], show=False
)
ax[idx].set_title(ch_type)
# Hide x axis label for all but the last subplot
if idx < len(ch_types) - 1:
ax[idx].set_xlabel(None)
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
title = 'Global field power'
html = _html_image_element(
img=img,
id=dom_id,
tags=tags,
div_klass='evoked evoked-gfp',
img_klass='evoked evoked-gfp',
title=title,
caption=None,
image_format=image_format,
show=True
)
return html
def _render_evoked_whitened(self, evoked, *, noise_cov, image_format,
tags):
"""Render whitened evoked."""
dom_id = self._get_dom_id()
fig = evoked.plot_white(
noise_cov=noise_cov,
show=False
)
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
title = 'Whitened'
html = _html_image_element(
img=img, id=dom_id, div_klass='evoked',
img_klass='evoked evoked-whitened', title=title, caption=None,
show=True, image_format=image_format, tags=tags
)
return html
def _render_evoked(self, evoked, noise_cov, add_projs, n_time_points,
image_format, tags, topomap_kwargs, n_jobs):
ch_types = _get_ch_types(evoked)
joint_html = self._render_evoked_joint(
evoked=evoked, ch_types=ch_types,
image_format=image_format, tags=tags,
topomap_kwargs=topomap_kwargs,
)
slider_html, _ = self._render_evoked_topomap_slider(
evoked=evoked, ch_types=ch_types,
n_time_points=n_time_points,
image_format=image_format,
tags=tags, topomap_kwargs=topomap_kwargs,
n_jobs=n_jobs
)
gfp_html = self._render_evoked_gfp(
evoked=evoked, ch_types=ch_types, image_format=image_format,
tags=tags
)
if noise_cov is not None:
whitened_html = self._render_evoked_whitened(
evoked=evoked,
noise_cov=noise_cov,
image_format=image_format,
tags=tags
)
else:
whitened_html = ''
# SSP projectors
ssp_projs_html = self._ssp_projs_html(
add_projs=add_projs, info=evoked, image_format=image_format,
tags=tags, topomap_kwargs=topomap_kwargs)
logger.debug('Evoked: done')
return joint_html, slider_html, gfp_html, whitened_html, ssp_projs_html
def _render_events(self, events, *, event_id, sfreq, first_samp, title,
image_format, tags):
"""Render events."""
if not isinstance(events, np.ndarray):
events = read_events(filename=events)
fig = plot_events(
events=events,
event_id=event_id,
sfreq=sfreq,
first_samp=first_samp,
show=False
)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(
fig=fig,
image_format=image_format,
)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img,
id=dom_id,
div_klass='events',
img_klass='events',
tags=tags,
title=title,
caption=None,
show=True,
image_format=image_format
)
return html, dom_id
def _epochs_psd_img_html(
self, *, epochs, psd, image_format, tags
):
if psd:
epoch_duration = epochs.tmax - epochs.tmin
if psd is True: # Entire time range -> all epochs
epochs_for_psd = epochs # Avoid creating a copy
else: # Only a subset of epochs
signal_duration = len(epochs) * epoch_duration
n_epochs_required = int(
np.ceil(psd / epoch_duration)
)
if n_epochs_required > len(epochs):
raise ValueError(
f'You requested to calculate PSD on a duration of '
f'{psd:.3f} sec, but all your epochs '
f'are only {signal_duration:.1f} sec long'
)
epochs_idx = np.round(
np.linspace(
start=0,
stop=len(epochs) - 1,
num=n_epochs_required
)
).astype(int)
# Edge case: there might be duplicate indices due to rounding?
epochs_idx_unique = np.unique(epochs_idx)
if len(epochs_idx_unique) != len(epochs_idx):
duration = round(
len(epochs_idx_unique) * epoch_duration, 1
)
warn(f'Using {len(epochs_idx_unique)} epochs, only '
f'covering {duration:.1f} sec of data')
del duration
epochs_for_psd = epochs[epochs_idx_unique]
dom_id = self._get_dom_id()
if epochs.info['lowpass'] is not None:
fmax = epochs.info['lowpass'] + 15
# Must not exceed half the sampling frequency
if fmax > 0.5 * epochs.info['sfreq']:
fmax = np.inf
else:
fmax = np.inf
fig = epochs_for_psd.plot_psd(fmax=fmax, show=False)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
duration = round(epoch_duration * len(epochs_for_psd), 1)
caption = (
f'PSD calculated from {len(epochs_for_psd)} epochs '
f'({duration:.1f} sec).'
)
psd_img_html = _html_image_element(
img=img, id=dom_id, div_klass='epochs', img_klass='epochs',
show=True, image_format=image_format, title='PSD',
caption=caption, tags=tags
)
else:
psd_img_html = ''
return psd_img_html
def _render_epochs_metadata(self, *, epochs, tags):
metadata = epochs.metadata.copy()
# Ensure we have a named index
if not metadata.index.name:
metadata.index.name = 'Epoch #'
assert metadata.index.is_unique
index_name = metadata.index.name # store for later use
metadata = metadata.reset_index() # We want "proper" columns only
html = metadata.to_html(
border=0,
index=False,
show_dimensions=True,
justify='unset',
float_format=lambda x: f'{round(x, 3):.3f}',
classes='table table-hover table-striped '
'table-sm table-responsive small'
)
del metadata
# Massage the table such that it woks nicely with bootstrap-table
htmls = html.split('\n')
header_pattern = '<th>(.*)</th>'
for idx, html in enumerate(htmls):
if '<table' in html:
htmls[idx] = html.replace(
'<table',
'<table '
'id="mytable" '
'data-toggle="table" '
f'data-unique-id="{index_name}" '
'data-search="true" ' # search / filter
'data-search-highlight="true" '
'data-show-columns="true" ' # show/hide columns
'data-show-toggle="true" ' # allow card view
'data-show-columns-toggle-all="true" '
'data-click-to-select="true" '
'data-show-copy-rows="true" '
'data-show-export="true" ' # export to a file
'data-export-types="[csv]" '
"data-export-options='{\"fileName\": \"metadata\"}' "
'data-icon-size="sm" '
'data-height="400"'
)
continue
elif '<tr' in html:
# Add checkbox for row selection
htmls[idx] = (
f'{html}\n'
f'<th data-field="state" data-checkbox="true"></th>'
)
continue
col_headers = re.findall(pattern=header_pattern, string=html)
if col_headers:
# Make columns sortable
assert len(col_headers) == 1
col_header = col_headers[0]
htmls[idx] = html.replace(
'<th>',
f'<th data-field="{col_header.lower()}" '
f'data-sortable="true">'
)
html = '\n'.join(htmls)
dom_id = self._get_dom_id()
metadata_html = _html_element(
div_klass='epochs',
id=dom_id,
tags=tags,
title='Metadata',
html=html
)
return metadata_html
def _render_epochs(
self, *, epochs, psd, add_projs, topomap_kwargs, drop_log_ignore,
image_format, tags
):
"""Render epochs."""
if isinstance(epochs, BaseEpochs):
fname = epochs.filename
else:
fname = epochs
epochs = read_epochs(fname, preload=False)
# Summary table
dom_id = self._get_dom_id()
repr_html = _html_element(
div_klass='epochs',
id=dom_id,
tags=tags,
title='Info',
html=epochs._repr_html_()
)
# Metadata table
if epochs.metadata is not None:
metadata_html = self._render_epochs_metadata(
epochs=epochs, tags=tags
)
else:
metadata_html = ''
# ERP/ERF image(s)
ch_types = _get_ch_types(epochs)
erp_img_htmls = []
epochs.load_data()
for ch_type in ch_types:
with use_log_level(False):
figs = epochs.copy().pick(ch_type, verbose=False).plot_image(
show=False
)
assert len(figs) == 1
fig = figs[0]
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
if ch_type in ('mag', 'grad'):
title_start = 'ERF image'
else:
assert 'eeg' in ch_type
title_start = 'ERP image'
title = (f'{title_start} '
f'({_handle_default("titles")[ch_type]})')
dom_id = self._get_dom_id()
erp_img_htmls.append(
_html_image_element(
img=img,
div_klass='epochs erp-image',
img_klass='epochs erp-image',
tags=tags,
title=title,
caption=None,
show=True,
image_format=image_format,
id=dom_id
)
)
erp_imgs_html = '\n'.join(erp_img_htmls)
# Drop log
if epochs._bad_dropped:
title = 'Drop log'
dom_id = self._get_dom_id()
if epochs.drop_log_stats(ignore=drop_log_ignore) == 0: # No drops
drop_log_img_html = _html_element(
html='No epochs exceeded the rejection thresholds. '
'Nothing was dropped.',
id=dom_id, div_klass='epochs', title=title, tags=tags
)
else:
fig = epochs.plot_drop_log(
subject=self.subject, ignore=drop_log_ignore, show=False
)
tight_layout(fig=fig)
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
drop_log_img_html = _html_image_element(
img=img, id=dom_id, div_klass='epochs', img_klass='epochs',
show=True, image_format=image_format, title=title,
caption=None, tags=tags
)
else:
drop_log_img_html = ''
psd_img_html = self._epochs_psd_img_html(
epochs=epochs, psd=psd, image_format=image_format, tags=tags
)
ssp_projs_html = self._ssp_projs_html(
add_projs=add_projs, info=epochs, image_format=image_format,
tags=tags, topomap_kwargs=topomap_kwargs
)
return (repr_html, metadata_html, erp_imgs_html, drop_log_img_html,
psd_img_html, ssp_projs_html)
def _render_cov(self, cov, *, info, image_format, tags):
"""Render covariance matrix & SVD."""
if not isinstance(cov, Covariance):
cov = read_cov(cov)
if not isinstance(info, Info):
info = read_info(info)
fig_cov, fig_svd = plot_cov(cov=cov, info=info, show=False,
show_svd=True)
figs = [fig_cov, fig_svd]
htmls = []
titles = (
'Covariance matrix',
'Singular values'
)
for fig, title in zip(figs, titles):
_constrain_fig_resolution(
fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES
)
img = _fig_to_img(fig=fig, image_format=image_format)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, id=dom_id, div_klass='covariance',
img_klass='covariance', title=title, caption=None,
image_format=image_format, tags=tags, show=True
)
htmls.append(html)
return htmls
def _render_trans(self, *, trans, info, subject, subjects_dir, alpha,
title, tags):
"""Render trans (only PNG)."""
if not isinstance(trans, Transform):
trans = read_trans(trans)
if not isinstance(info, Info):
info = read_info(info)
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir, dig=True,
meg=['helmet', 'sensors'], show_axes=True,
coord_frame='mri')
img, caption = _iterate_trans_views(
function=plot_alignment, alpha=alpha, **kwargs)
dom_id = self._get_dom_id()
html = _html_image_element(
img=img, id=dom_id, div_klass='trans',
img_klass='trans', title=title, caption=caption,
show=True, image_format='png', tags=tags
)
return html, dom_id
def _render_stc(self, *, stc, title, subject, subjects_dir, n_time_points,
image_format, tags, stc_plot_kwargs):
"""Render STC."""
if isinstance(stc, SourceEstimate):
if subject is None:
subject = self.subject # supplied during Report init
if not subject:
subject = stc.subject # supplied when loading STC
if not subject:
raise ValueError(
'Please specify the subject name, as it cannot '
'be found in stc.subject. You may wish to pass '
'the "subject" parameter to read_source_estimate()'
)
else:
subject = subject
else:
fname = stc
stc = read_source_estimate(fname=fname, subject=subject)
subjects_dir = (self.subjects_dir if subjects_dir is None
else subjects_dir)
if n_time_points is None:
n_time_points = min(len(stc.times), 51)
elif n_time_points > len(stc.times):
raise ValueError(
f'The requested number of time points ({n_time_points}) '
f'exceeds the time points in the provided STC object '
f'({len(stc.times)})'
)
if n_time_points == 1: # only a single time point, pick the first one
times = [stc.times[0]]
else:
times = np.linspace(
start=stc.times[0],
stop=stc.times[-1],
num=n_time_points
)
t_zero_idx = np.abs(times).argmin() # index of time closest to zero
# Plot using 3d backend if available, and use Matplotlib
# otherwise.
import matplotlib.pyplot as plt
stc_plot_kwargs = _handle_default(
'report_stc_plot_kwargs', stc_plot_kwargs
)
stc_plot_kwargs.update(subject=subject, subjects_dir=subjects_dir)
if get_3d_backend() is not None:
brain = stc.plot(**stc_plot_kwargs)
brain._renderer.plotter.subplot(0, 0)
backend_is_3d = True
else:
backend_is_3d = False
figs = []
for t in times:
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='More than 20 figures have been opened',
category=RuntimeWarning)
if backend_is_3d:
brain.set_time(t)
fig, ax = plt.subplots(figsize=(4.5, 4.5))
ax.imshow(brain.screenshot(time_viewer=True, mode='rgb'))
ax.axis('off')
tight_layout(fig=fig)
_constrain_fig_resolution(
fig,
max_width=stc_plot_kwargs['size'][0],
max_res=MAX_IMG_RES
)
figs.append(fig)
plt.close(fig)
else:
fig_lh = plt.figure()
fig_rh = plt.figure()
brain_lh = stc.plot(
views='lat', hemi='lh',
initial_time=t,
backend='matplotlib',
subject=subject,
subjects_dir=subjects_dir,
figure=fig_lh
)
brain_rh = stc.plot(
views='lat', hemi='rh',
initial_time=t,
subject=subject,
subjects_dir=subjects_dir,
backend='matplotlib',
figure=fig_rh
)
tight_layout(fig=fig_lh) # TODO is this necessary?
tight_layout(fig=fig_rh) # TODO is this necessary?
_constrain_fig_resolution(
fig_lh,
max_width=stc_plot_kwargs['size'][0],
max_res=MAX_IMG_RES
)
_constrain_fig_resolution(
fig_rh,
max_width=stc_plot_kwargs['size'][0],
max_res=MAX_IMG_RES
)
figs.append(brain_lh)
figs.append(brain_rh)
plt.close(fig_lh)
plt.close(fig_rh)
if backend_is_3d:
brain.close()
else:
brain_lh.close()
brain_rh.close()
captions = [f'Time point: {round(t, 3):0.3f} s' for t in times]
html, dom_id = self._render_slider(
figs=figs,
imgs=None,
captions=captions,
title=title,
image_format=image_format,
start_idx=t_zero_idx,
tags=tags
)
return html, dom_id
def _render_bem(self, *, subject, subjects_dir, decim, n_jobs, width=512,
image_format, tags):
"""Render mri+bem (only PNG)."""
if subjects_dir is None:
subjects_dir = self.subjects_dir
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warn(f'MRI file "{mri_fname}" does not exist')
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
surfaces = _get_bem_plotting_surfaces(bem_path)
if not surfaces:
warn('No BEM surfaces found, rendering empty MRI')
htmls = []
htmls.append('<div class="row">')
for orientation in _BEM_VIEWS:
html = self._render_one_bem_axis(
mri_fname=mri_fname, surfaces=surfaces,
orientation=orientation, decim=decim, n_jobs=n_jobs,
width=width, image_format=image_format,
tags=tags
)
htmls.append(html)
htmls.append('</div>')
return '\n'.join(htmls)
def _clean_tags(tags):
if isinstance(tags, str):
tags = (tags,)
# Replace any whitespace characters with dashes
tags_cleaned = tuple(re.sub(r'[\s*]', '-', tag) for tag in tags)
return tags_cleaned
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory."""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(VALID_EXTENSIONS):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
###############################################################################
# Scraper for sphinx-gallery
_SCRAPER_TEXT = '''
.. only:: builder_html
.. container:: row
.. rubric:: The `HTML document <{0}>`__ written by :meth:`mne.Report.save`:
.. raw:: html
<iframe class="sg_report" sandbox="allow-scripts" src="{0}"></iframe>
''' # noqa: E501
# Adapted from fa-file-code
_FA_FILE_CODE = '<svg class="sg_report" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="#dec" d="M149.9 349.1l-.2-.2-32.8-28.9 32.8-28.9c3.6-3.2 4-8.8.8-12.4l-.2-.2-17.4-18.6c-3.4-3.6-9-3.7-12.4-.4l-57.7 54.1c-3.7 3.5-3.7 9.4 0 12.8l57.7 54.1c1.6 1.5 3.8 2.4 6 2.4 2.4 0 4.8-1 6.4-2.8l17.4-18.6c3.3-3.5 3.1-9.1-.4-12.4zm220-251.2L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zM256 51.9l76.1 76.1H256zM336 464H48V48h160v104c0 13.3 10.7 24 24 24h104zM209.6 214c-4.7-1.4-9.5 1.3-10.9 6L144 408.1c-1.4 4.7 1.3 9.6 6 10.9l24.4 7.1c4.7 1.4 9.6-1.4 10.9-6L240 231.9c1.4-4.7-1.3-9.6-6-10.9zm24.5 76.9l.2.2 32.8 28.9-32.8 28.9c-3.6 3.2-4 8.8-.8 12.4l.2.2 17.4 18.6c3.3 3.5 8.9 3.7 12.4.4l57.7-54.1c3.7-3.5 3.7-9.4 0-12.8l-57.7-54.1c-3.5-3.3-9.1-3.2-12.4.4l-17.4 18.6c-3.3 3.5-3.1 9.1.4 12.4z" class=""></path></svg>' # noqa: E501
class _ReportScraper(object):
"""Scrape Report outputs.
Only works properly if conf.py is configured properly and the file
is written to the same directory as the example script.
"""
def __init__(self):
self.app = None
self.files = dict()
def __repr__(self):
return '<ReportScraper>'
def __call__(self, block, block_vars, gallery_conf):
for report in block_vars['example_globals'].values():
if (isinstance(report, Report) and
report.fname is not None and
report.fname.endswith('.html') and
gallery_conf['builder_name'] == 'html'):
# Thumbnail
image_path_iterator = block_vars['image_path_iterator']
img_fname = next(image_path_iterator)
img_fname = img_fname.replace('.png', '.svg')
with open(img_fname, 'w') as fid:
fid.write(_FA_FILE_CODE)
# copy HTML file
html_fname = op.basename(report.fname)
out_dir = op.join(
self.app.builder.outdir,
op.relpath(op.dirname(block_vars['target_file']),
self.app.builder.srcdir))
os.makedirs(out_dir, exist_ok=True)
out_fname = op.join(out_dir, html_fname)
assert op.isfile(report.fname)
self.files[report.fname] = out_fname
# embed links/iframe
data = _SCRAPER_TEXT.format(html_fname)
return data
return ''
def copyfiles(self, *args, **kwargs):
for key, value in self.files.items():
copyfile(key, value)
| 36.02037 | 938 | 0.553302 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.