code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
# landportal-data-access-api
# Copyright (c)2014, WESO, Web Semantics Oviedo.
# Written by <NAME>.
# This file is part of landportal-data-access-api.
#
# landportal-data-access-api is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License.
#
# landportal-data-access-api is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with landportal-data-access-api. If not, see <http://www.gnu.org/licenses/>.
# landportal-data-access-api is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>
"""
Created on 03/02/2014
This file make the setup configuration for the Flask-Server
:author: <NAME>
"""
from flask.app import Flask
from flask.ext.cache import Cache
from flask.ext.track_usage import TrackUsage
from flask.ext.track_usage.storage.sql import SQLStorage
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://'
app.config['TRACK_USAGE_USE_FREEGEOIP'] = False
app.config['TRACK_USAGE_INCLUDE_OR_EXCLUDE_VIEWS'] = 'exclude'
cache = Cache(app, config={'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': ['localhost:11211']})
app.config['DEBUG'] = True
db = SQLAlchemy(app)
sql_database_storage = SQLStorage(app.config['SQLALCHEMY_DATABASE_URI'], table_name='api_usage')
t = TrackUsage(app, sql_database_storage)
from app import views
|
[
"flask.app.Flask",
"flask.ext.track_usage.storage.sql.SQLStorage",
"flask_sqlalchemy.SQLAlchemy",
"flask.ext.cache.Cache",
"flask.ext.track_usage.TrackUsage"
] |
[((1261, 1276), 'flask.app.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1266, 1276), False, 'from flask.app import Flask\n'), ((1462, 1561), 'flask.ext.cache.Cache', 'Cache', (['app'], {'config': "{'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': ['localhost:11211']}"}), "(app, config={'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': [\n 'localhost:11211']})\n", (1467, 1561), False, 'from flask.ext.cache import Cache\n'), ((1589, 1604), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (1599, 1604), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((1628, 1701), 'flask.ext.track_usage.storage.sql.SQLStorage', 'SQLStorage', (["app.config['SQLALCHEMY_DATABASE_URI']"], {'table_name': '"""api_usage"""'}), "(app.config['SQLALCHEMY_DATABASE_URI'], table_name='api_usage')\n", (1638, 1701), False, 'from flask.ext.track_usage.storage.sql import SQLStorage\n'), ((1706, 1743), 'flask.ext.track_usage.TrackUsage', 'TrackUsage', (['app', 'sql_database_storage'], {}), '(app, sql_database_storage)\n', (1716, 1743), False, 'from flask.ext.track_usage import TrackUsage\n')]
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
class LabelSmoothSoftmaxCEV1(nn.Module):
'''
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
'''
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, label):
'''
args: logits: tensor of shape (N, C, H, W)
args: label: tensor of shape(N, H, W)
'''
# overcome ignored label
logits = logits.float() # use fp32 to avoid nan
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == self.lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * label, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
class LSRCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, label, lb_smooth, reduction, lb_ignore):
# prepare label
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label == lb_ignore
n_valid = (ignore == 0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - lb_smooth, lb_smooth / num_classes
label = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
mask = [a, torch.arange(label.size(1)), *b]
label[mask] = 0
coeff = (num_classes - 1) * lb_neg + lb_pos
ctx.coeff = coeff
ctx.mask = mask
ctx.logits = logits
ctx.label = label
ctx.reduction = reduction
ctx.n_valid = n_valid
loss = torch.log_softmax(logits, dim=1).neg_().mul_(label).sum(dim=1)
if reduction == 'mean':
loss = loss.sum().div_(n_valid)
if reduction == 'sum':
loss = loss.sum()
return loss
@staticmethod
def backward(ctx, grad_output):
coeff = ctx.coeff
mask = ctx.mask
logits = ctx.logits
label = ctx.label
reduction = ctx.reduction
n_valid = ctx.n_valid
scores = torch.softmax(logits, dim=1).mul_(coeff)
scores[mask] = 0
if reduction == 'none':
grad = scores.sub_(label).mul_(grad_output.unsqueeze(1))
elif reduction == 'sum':
grad = scores.sub_(label).mul_(grad_output)
elif reduction == 'mean':
grad = scores.sub_(label).mul_(grad_output.div_(n_valid))
return grad, None, None, None, None, None
class LabelSmoothSoftmaxCEV2(nn.Module):
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV2, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
def forward(self, logits, label):
return LSRCrossEntropyFunction.apply(
logits, label, self.lb_smooth, self.reduction, self.lb_ignore)
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
net1 = torchvision.models.resnet18(pretrained=True)
net2 = torchvision.models.resnet18(pretrained=True)
criteria1 = LabelSmoothSoftmaxCEV1(lb_smooth=0.1, ignore_index=255)
criteria2 = LabelSmoothSoftmaxCEV2(lb_smooth=0.1, ignore_index=255)
net1.cuda()
net2.cuda()
net1.train()
net2.train()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 128
for it in range(300000):
inten = torch.randn(bs, 3, 224, 244).cuda()
inten[0, 1, 0, 0] = 255
inten[0, 0, 1, 2] = 255
inten[0, 2, 5, 28] = 255
lbs = torch.randint(0, 1000, (bs, )).cuda()
logits = net1(inten)
loss1 = criteria1(logits, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
# print(net1.fc.weight[:, :5])
logits = net2(inten)
loss2 = criteria2(logits, lbs)
optim2.zero_grad()
loss2.backward()
optim2.step()
# print(net2.fc.weight[:, :5])
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
# print(net1.fc.weight.numel())
print(torch.mean(torch.abs(net1.fc.weight - net2.fc.weight)).item())
print(torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
# print(loss1.item())
# print(loss2.item())
print(loss1.item() - loss2.item())
|
[
"torchvision.models.resnet18",
"torch.log_softmax",
"torch.randint",
"numpy.random.seed",
"torch.nn.LogSoftmax",
"torch.manual_seed",
"torch.randn",
"torch.softmax",
"torch.abs",
"random.seed",
"torch.empty_like",
"torch.no_grad",
"torch.sum"
] |
[((3876, 3897), 'torch.manual_seed', 'torch.manual_seed', (['(15)'], {}), '(15)\n', (3893, 3897), False, 'import torch\n'), ((3902, 3917), 'random.seed', 'random.seed', (['(15)'], {}), '(15)\n', (3913, 3917), False, 'import random\n'), ((3922, 3940), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (3936, 3940), True, 'import numpy as np\n'), ((3998, 4042), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4025, 4042), False, 'import torchvision\n'), ((4054, 4098), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4081, 4098), False, 'import torchvision\n'), ((514, 534), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (527, 534), True, 'import torch.nn as nn\n'), ((797, 812), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (810, 812), False, 'import torch\n'), ((1276, 1306), 'torch.sum', 'torch.sum', (['(logs * label)'], {'dim': '(1)'}), '(logs * label, dim=1)\n', (1285, 1306), False, 'import torch\n'), ((5087, 5102), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5100, 5102), False, 'import torch\n'), ((2903, 2931), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2916, 2931), False, 'import torch\n'), ((4525, 4553), 'torch.randn', 'torch.randn', (['bs', '(3)', '(224)', '(244)'], {}), '(bs, 3, 224, 244)\n', (4536, 4553), False, 'import torch\n'), ((4672, 4701), 'torch.randint', 'torch.randint', (['(0)', '(1000)', '(bs,)'], {}), '(0, 1000, (bs,))\n', (4685, 4701), False, 'import torch\n'), ((1925, 1949), 'torch.empty_like', 'torch.empty_like', (['logits'], {}), '(logits)\n', (1941, 1949), False, 'import torch\n'), ((2442, 2474), 'torch.log_softmax', 'torch.log_softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2459, 2474), False, 'import torch\n'), ((5285, 5327), 'torch.abs', 'torch.abs', (['(net1.fc.weight - net2.fc.weight)'], {}), '(net1.fc.weight - net2.fc.weight)\n', (5294, 5327), False, 'import torch\n'), ((5370, 5418), 'torch.abs', 'torch.abs', (['(net1.conv1.weight - net2.conv1.weight)'], {}), '(net1.conv1.weight - net2.conv1.weight)\n', (5379, 5418), False, 'import torch\n'), ((1114, 1138), 'torch.empty_like', 'torch.empty_like', (['logits'], {}), '(logits)\n', (1130, 1138), False, 'import torch\n')]
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import os
import shlex
import sys
import textwrap
from typing import Mapping
from pants.base.build_root import BuildRoot
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.bsp.context import BSPContext
from pants.bsp.protocol import BSPConnection
from pants.bsp.util_rules.lifecycle import BSP_VERSION, BSPLanguageSupport
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals.session import SessionValues
from pants.engine.unions import UnionMembership
from pants.goal.builtin_goal import BuiltinGoal
from pants.init.engine_initializer import GraphSession
from pants.option.option_types import BoolOption, FileListOption, StrListOption
from pants.option.option_value_container import OptionValueContainer
from pants.option.options import Options
from pants.util.docutil import bin_name
from pants.util.strutil import softwrap
from pants.version import VERSION
_logger = logging.getLogger(__name__)
class BSPGoal(BuiltinGoal):
name = "experimental-bsp"
help = "Setup repository for Build Server Protocol (https://build-server-protocol.github.io/)."
server = BoolOption(
"--server",
default=False,
advanced=True,
help=softwrap(
"""
Run the Build Server Protocol server. Pants will receive BSP RPC requests via the console.
This should only ever be invoked via the IDE.
"""
),
)
runner_env_vars = StrListOption(
"--runner-env-vars",
default=["PATH"],
help=softwrap(
f"""
Environment variables to set in the BSP runner script when setting up BSP in a repository.
Entries are either strings in the form `ENV_VAR=value` to set an explicit value;
or just `ENV_VAR` to copy the value from Pants' own environment when the {name} goal was run.
This option only takes effect when the BSP runner script is written. If the option changes, you
must run `{bin_name()} {name}` again to write a new copy of the BSP runner script.
Note: The environment variables passed to the Pants BSP server will be those set for your IDE
and not your shell. For example, on macOS, the IDE is generally launched by `launchd` after
clicking on a Dock icon, and not from the shell. Thus, any environment variables set for your
shell will likely not be seen by the Pants BSP server. At the very least, on macOS consider
writing an explicit PATH into the BSP runner script via this option.
"""
),
advanced=True,
)
groups_config_files = FileListOption(
"--groups-config-files",
help=softwrap(
"""
A list of config files that define groups of Pants targets to expose to IDEs via Build Server Protocol.
Pants generally uses fine-grained targets to define the components of a build (in many cases on a file-by-file
basis). Many IDEs, however, favor coarse-grained targets that contain large numbers of source files.
To accommodate this distinction, the Pants BSP server will compute a set of BSP build targets to use
from the groups specified in the config files set for this option. Each group will become one or more
BSP build targets.
Each config file is a TOML file with a `groups` dictionary with the following format for an entry:
# The dictionary key is used to identify the group. It must be unique.
[groups.ID1]:
# One or more Pants address specs defining what targets to include in the group.
addresses = [
"src/jvm::",
"tests/jvm::",
]
# Filter targets to a specific resolve. Targets in a group must be from a single resolve.
# Format of filter is `TYPE:RESOLVE_NAME`. The only supported TYPE is `jvm`. RESOLVE_NAME must be
# a valid resolve name.
resolve = "jvm:jvm-default"
display_name = "Display Name" # (Optional) Name shown to the user in the IDE.
base_directory = "path/from/build/root" # (Optional) Hint to the IDE for where the build target should "live."
Pants will merge the contents of the config files together. If the same ID is used for a group definition,
in multiple config files, the definition in the latter config file will take effect.
"""
),
)
def run(
self,
*,
build_config: BuildConfiguration,
graph_session: GraphSession,
options: Options,
specs: Specs,
union_membership: UnionMembership,
) -> ExitCode:
goal_options = options.for_scope(self.name)
if goal_options.server:
return self._run_server(
graph_session=graph_session,
union_membership=union_membership,
)
current_session_values = graph_session.scheduler_session.py_session.session_values
env = current_session_values[CompleteEnvironment]
return self._setup_bsp_connection(
union_membership=union_membership, env=env, options=goal_options
)
def _setup_bsp_connection(
self,
union_membership: UnionMembership,
env: Mapping[str, str],
options: OptionValueContainer,
) -> ExitCode:
"""Setup the BSP connection file."""
build_root = BuildRoot()
bsp_conn_path = build_root.pathlib_path / ".bsp" / "pants.json"
if bsp_conn_path.exists():
print(
f"ERROR: A BSP connection file already exists at path `{bsp_conn_path}`. "
"Please delete that file if you intend to re-setup BSP in this repository.",
file=sys.stderr,
)
return PANTS_FAILED_EXIT_CODE
bsp_dir = build_root.pathlib_path / ".pants.d" / "bsp"
bsp_scripts_dir = bsp_dir / "scripts"
bsp_scripts_dir.mkdir(exist_ok=True, parents=True)
bsp_logs_dir = bsp_dir / "logs"
bsp_logs_dir.mkdir(exist_ok=True, parents=True)
# Determine which environment variables to set in the BSP runner script.
# TODO: Consider whether some of this logic could be shared with
# `pants.engine.environment.CompleteEnvironment.get_subset`.
run_script_env_lines: list[str] = []
for env_var in options.runner_env_vars:
if "=" in env_var:
run_script_env_lines.append(env_var)
else:
if env_var not in env:
print(
f"ERROR: The `[{self.name}].runner_env_vars` option is configured to add the `{env_var}` "
"environment variable to the BSP runner script using its value in the current environment. "
"That environment variable, however, is not present in the current environment. "
"Please either set it in the current environment first or else configure a specific value "
"in `pants.toml`.",
file=sys.stderr,
)
return PANTS_FAILED_EXIT_CODE
run_script_env_lines.append(f"{env_var}={env[env_var]}")
run_script_env_lines_str = "\n".join(
[f"export {shlex.quote(line)}" for line in run_script_env_lines]
)
run_script_path = bsp_scripts_dir / "run-bsp.sh"
run_script_path.write_text(
textwrap.dedent(
f"""\
#!/bin/sh
{run_script_env_lines_str}
exec 2>>{shlex.quote(str(bsp_logs_dir / 'stderr.log'))}
env 1>&2
exec {shlex.quote(bin_name())} --no-pantsd {self.name} --server
"""
)
)
run_script_path.chmod(0o755)
_logger.info(f"Wrote BSP runner script to `{run_script_path}`.")
bsp_conn_data = {
"name": "Pants",
"version": VERSION,
"bspVersion": BSP_VERSION,
"languages": sorted(
[lang.language_id for lang in union_membership.get(BSPLanguageSupport)]
),
"argv": ["./.pants.d/bsp/scripts/run-bsp.sh"],
}
bsp_conn_path.parent.mkdir(exist_ok=True, parents=True)
bsp_conn_path.write_text(json.dumps(bsp_conn_data))
_logger.info(f"Wrote BSP connection file to `{bsp_conn_path}`.")
return PANTS_SUCCEEDED_EXIT_CODE
def _run_server(
self,
*,
graph_session: GraphSession,
union_membership: UnionMembership,
) -> ExitCode:
"""Run the BSP server."""
current_session_values = graph_session.scheduler_session.py_session.session_values
context = BSPContext()
session_values = SessionValues(
{
**current_session_values,
BSPContext: context,
}
)
scheduler_session = graph_session.scheduler_session.scheduler.new_session(
build_id="bsp", dynamic_ui=False, session_values=session_values
)
saved_stdout = sys.stdout
saved_stdin = sys.stdin
try:
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", buffering=0) # type: ignore[assignment]
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", buffering=0) # type: ignore[assignment]
conn = BSPConnection(
scheduler_session,
union_membership,
context,
sys.stdin, # type: ignore[arg-type]
sys.stdout, # type: ignore[arg-type]
)
conn.run()
finally:
sys.stdout = saved_stdout
sys.stdin = saved_stdin
return ExitCode(0)
|
[
"pants.engine.internals.session.SessionValues",
"pants.base.build_root.BuildRoot",
"sys.stdout.fileno",
"logging.getLogger",
"json.dumps",
"pants.bsp.protocol.BSPConnection",
"shlex.quote",
"pants.bsp.context.BSPContext",
"sys.stdin.fileno",
"pants.util.strutil.softwrap",
"pants.util.docutil.bin_name",
"pants.base.exiter.ExitCode"
] |
[((1251, 1278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1268, 1278), False, 'import logging\n'), ((5886, 5897), 'pants.base.build_root.BuildRoot', 'BuildRoot', ([], {}), '()\n', (5895, 5897), False, 'from pants.base.build_root import BuildRoot\n'), ((9251, 9263), 'pants.bsp.context.BSPContext', 'BSPContext', ([], {}), '()\n', (9261, 9263), False, 'from pants.bsp.context import BSPContext\n'), ((9289, 9351), 'pants.engine.internals.session.SessionValues', 'SessionValues', (['{**current_session_values, BSPContext: context}'], {}), '({**current_session_values, BSPContext: context})\n', (9302, 9351), False, 'from pants.engine.internals.session import SessionValues\n'), ((10253, 10264), 'pants.base.exiter.ExitCode', 'ExitCode', (['(0)'], {}), '(0)\n', (10261, 10264), False, 'from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode\n'), ((1544, 1744), 'pants.util.strutil.softwrap', 'softwrap', (['"""\n Run the Build Server Protocol server. Pants will receive BSP RPC requests via the console.\n This should only ever be invoked via the IDE.\n """'], {}), '(\n """\n Run the Build Server Protocol server. Pants will receive BSP RPC requests via the console.\n This should only ever be invoked via the IDE.\n """\n )\n', (1552, 1744), False, 'from pants.util.strutil import softwrap\n'), ((3049, 4882), 'pants.util.strutil.softwrap', 'softwrap', (['"""\n A list of config files that define groups of Pants targets to expose to IDEs via Build Server Protocol.\n\n Pants generally uses fine-grained targets to define the components of a build (in many cases on a file-by-file\n basis). Many IDEs, however, favor coarse-grained targets that contain large numbers of source files.\n To accommodate this distinction, the Pants BSP server will compute a set of BSP build targets to use\n from the groups specified in the config files set for this option. Each group will become one or more\n BSP build targets.\n\n Each config file is a TOML file with a `groups` dictionary with the following format for an entry:\n\n # The dictionary key is used to identify the group. It must be unique.\n [groups.ID1]:\n # One or more Pants address specs defining what targets to include in the group.\n addresses = [\n "src/jvm::",\n "tests/jvm::",\n ]\n # Filter targets to a specific resolve. Targets in a group must be from a single resolve.\n # Format of filter is `TYPE:RESOLVE_NAME`. The only supported TYPE is `jvm`. RESOLVE_NAME must be\n # a valid resolve name.\n resolve = "jvm:jvm-default"\n display_name = "Display Name" # (Optional) Name shown to the user in the IDE.\n base_directory = "path/from/build/root" # (Optional) Hint to the IDE for where the build target should "live."\n\n Pants will merge the contents of the config files together. If the same ID is used for a group definition,\n in multiple config files, the definition in the latter config file will take effect.\n """'], {}), '(\n """\n A list of config files that define groups of Pants targets to expose to IDEs via Build Server Protocol.\n\n Pants generally uses fine-grained targets to define the components of a build (in many cases on a file-by-file\n basis). Many IDEs, however, favor coarse-grained targets that contain large numbers of source files.\n To accommodate this distinction, the Pants BSP server will compute a set of BSP build targets to use\n from the groups specified in the config files set for this option. Each group will become one or more\n BSP build targets.\n\n Each config file is a TOML file with a `groups` dictionary with the following format for an entry:\n\n # The dictionary key is used to identify the group. It must be unique.\n [groups.ID1]:\n # One or more Pants address specs defining what targets to include in the group.\n addresses = [\n "src/jvm::",\n "tests/jvm::",\n ]\n # Filter targets to a specific resolve. Targets in a group must be from a single resolve.\n # Format of filter is `TYPE:RESOLVE_NAME`. The only supported TYPE is `jvm`. RESOLVE_NAME must be\n # a valid resolve name.\n resolve = "jvm:jvm-default"\n display_name = "Display Name" # (Optional) Name shown to the user in the IDE.\n base_directory = "path/from/build/root" # (Optional) Hint to the IDE for where the build target should "live."\n\n Pants will merge the contents of the config files together. If the same ID is used for a group definition,\n in multiple config files, the definition in the latter config file will take effect.\n """\n )\n', (3057, 4882), False, 'from pants.util.strutil import softwrap\n'), ((8819, 8844), 'json.dumps', 'json.dumps', (['bsp_conn_data'], {}), '(bsp_conn_data)\n', (8829, 8844), False, 'import json\n'), ((9893, 9980), 'pants.bsp.protocol.BSPConnection', 'BSPConnection', (['scheduler_session', 'union_membership', 'context', 'sys.stdin', 'sys.stdout'], {}), '(scheduler_session, union_membership, context, sys.stdin, sys.\n stdout)\n', (9906, 9980), False, 'from pants.bsp.protocol import BSPConnection\n'), ((9705, 9724), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (9722, 9724), False, 'import sys\n'), ((9807, 9825), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (9823, 9825), False, 'import sys\n'), ((2343, 2353), 'pants.util.docutil.bin_name', 'bin_name', ([], {}), '()\n', (2351, 2353), False, 'from pants.util.docutil import bin_name\n'), ((7804, 7821), 'shlex.quote', 'shlex.quote', (['line'], {}), '(line)\n', (7815, 7821), False, 'import shlex\n'), ((8210, 8220), 'pants.util.docutil.bin_name', 'bin_name', ([], {}), '()\n', (8218, 8220), False, 'from pants.util.docutil import bin_name\n')]
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset built from <auto-generated, manually corrected> caption pairs of
YouTube videos with labels capturing the differences between the two."""
import json
import datasets
_CITATION = ""
_DESCRIPTION = """\
Dataset built from pairs of YouTube captions where both 'auto-generated' and
'manually-corrected' captions are available for a single specified language.
This dataset labels two-way (e.g. ignoring single-sided insertions) same-length
token differences in the `diff_type` column. The `default_seq` is composed of
tokens from the 'auto-generated' captions. When a difference occurs between
the 'auto-generated' vs 'manually-corrected' captions types, the `correction_seq`
contains tokens from the 'manually-corrected' captions.
"""
_LICENSE = "MIT License"
_RELEASE_TAG = "v1.0"
_NUM_FILES = 4
_URLS = [
f"https://raw.githubusercontent.com/2dot71mily/youtube_captions_corrections/{_RELEASE_TAG}/data/transcripts/en/split/youtube_caption_corrections_{i}.json"
for i in range(_NUM_FILES)
]
class YoutubeCaptionCorrections(datasets.GeneratorBasedBuilder):
"""YouTube captions corrections."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"video_ids": datasets.Value("string"),
"default_seq": datasets.Sequence(datasets.Value("string")),
"correction_seq": datasets.Sequence(datasets.Value("string")),
"diff_type": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"NO_DIFF",
"CASE_DIFF",
"PUNCUATION_DIFF",
"CASE_AND_PUNCUATION_DIFF",
"STEM_BASED_DIFF",
"DIGIT_DIFF",
"INTRAWORD_PUNC_DIFF",
"UNKNOWN_TYPE_DIFF",
"RESERVED_DIFF",
]
)
),
}
),
supervised_keys=("correction_seq", "diff_type"),
homepage="https://github.com/2dot71mily/youtube_captions_corrections",
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_filepaths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": downloaded_filepaths},
),
]
def _generate_examples(self, filepaths):
"""Yields examples."""
for fp in filepaths:
with open(fp, "r", encoding="utf-8") as json_file:
json_lists = list(json_file)
for json_list_str in json_lists:
json_list = json.loads(json_list_str)
for ctr, result in enumerate(json_list):
response = {
"video_ids": result["video_ids"],
"diff_type": result["diff_type"],
"default_seq": result["default_seq"],
"correction_seq": result["correction_seq"],
}
yield ctr, response
|
[
"datasets.Value",
"datasets.features.ClassLabel",
"datasets.SplitGenerator",
"json.loads"
] |
[((3228, 3330), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'filepaths': downloaded_filepaths}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'filepaths':\n downloaded_filepaths})\n", (3251, 3330), False, 'import datasets\n'), ((3672, 3697), 'json.loads', 'json.loads', (['json_list_str'], {}), '(json_list_str)\n', (3682, 3697), False, 'import json\n'), ((1964, 1988), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (1978, 1988), False, 'import datasets\n'), ((2043, 2067), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2057, 2067), False, 'import datasets\n'), ((2126, 2150), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2140, 2150), False, 'import datasets\n'), ((2229, 2438), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['NO_DIFF', 'CASE_DIFF', 'PUNCUATION_DIFF', 'CASE_AND_PUNCUATION_DIFF',\n 'STEM_BASED_DIFF', 'DIGIT_DIFF', 'INTRAWORD_PUNC_DIFF',\n 'UNKNOWN_TYPE_DIFF', 'RESERVED_DIFF']"}), "(names=['NO_DIFF', 'CASE_DIFF',\n 'PUNCUATION_DIFF', 'CASE_AND_PUNCUATION_DIFF', 'STEM_BASED_DIFF',\n 'DIGIT_DIFF', 'INTRAWORD_PUNC_DIFF', 'UNKNOWN_TYPE_DIFF', 'RESERVED_DIFF'])\n", (2257, 2438), False, 'import datasets\n')]
|
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
import frappe
from frappe import _
@frappe.whitelist()
def uom_list(item):
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":item},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
return new_uoms
def update_dn(doc, action):
for row in doc.items:
if row.item_code and row.uom:
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":row.item_code},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
if row.uom not in new_uoms:
frappe.throw((f"UOM {row.uom} is invalid for the item {row.item_code} in the row {row.idx}"))
def restrict_role(doc, action):
if doc.is_return:
roles = frappe.get_roles()
restricted_role = ['Muntaqeem']
for role in restricted_role:
if role in roles:
frappe.throw(_('Not Permitted'))
|
[
"frappe.db.get_list",
"frappe.whitelist",
"frappe.throw",
"frappe.get_roles",
"frappe._"
] |
[((118, 136), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (134, 136), False, 'import frappe\n'), ((170, 238), 'frappe.db.get_list', 'frappe.db.get_list', (['"""UOM Conversion Detail"""', "{'parent': item}", '"""uom"""'], {}), "('UOM Conversion Detail', {'parent': item}, 'uom')\n", (188, 238), False, 'import frappe\n'), ((770, 788), 'frappe.get_roles', 'frappe.get_roles', ([], {}), '()\n', (786, 788), False, 'import frappe\n'), ((431, 508), 'frappe.db.get_list', 'frappe.db.get_list', (['"""UOM Conversion Detail"""', "{'parent': row.item_code}", '"""uom"""'], {}), "('UOM Conversion Detail', {'parent': row.item_code}, 'uom')\n", (449, 508), False, 'import frappe\n'), ((614, 715), 'frappe.throw', 'frappe.throw', (['f"""UOM {row.uom} is invalid for the item {row.item_code} in the row {row.idx}"""'], {}), "(\n f'UOM {row.uom} is invalid for the item {row.item_code} in the row {row.idx}'\n )\n", (626, 715), False, 'import frappe\n'), ((892, 910), 'frappe._', '_', (['"""Not Permitted"""'], {}), "('Not Permitted')\n", (893, 910), False, 'from frappe import _\n')]
|
from main import ma
from models.Word import Word
from marshmallow.validate import Length
class WordSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Word
word = ma.String(required=True, validate=Length(min=3))
definition = ma.String(required=True, validate=Length(min=5))
pronunciation = ma.String(required=True, validate=Length(min=1))
word_schema = WordSchema()
words_schema = WordSchema(many=True)
|
[
"marshmallow.validate.Length"
] |
[((217, 230), 'marshmallow.validate.Length', 'Length', ([], {'min': '(3)'}), '(min=3)\n', (223, 230), False, 'from marshmallow.validate import Length\n'), ((283, 296), 'marshmallow.validate.Length', 'Length', ([], {'min': '(5)'}), '(min=5)\n', (289, 296), False, 'from marshmallow.validate import Length\n'), ((352, 365), 'marshmallow.validate.Length', 'Length', ([], {'min': '(1)'}), '(min=1)\n', (358, 365), False, 'from marshmallow.validate import Length\n')]
|
import json
import logging
class Config(object):
def __init__(self):
self.logger = logging.getLogger('CONFIG')
def read(self):
with open("config.json") as file:
data = json.load(file)
file.close()
self.logger.info("Success on reading configuration file.")
return data
|
[
"json.load",
"logging.getLogger"
] |
[((97, 124), 'logging.getLogger', 'logging.getLogger', (['"""CONFIG"""'], {}), "('CONFIG')\n", (114, 124), False, 'import logging\n'), ((207, 222), 'json.load', 'json.load', (['file'], {}), '(file)\n', (216, 222), False, 'import json\n')]
|
from dateutil.parser import parse
class Car:
def __init__(self, brand, name, price, year, damage, last_seen, image, id=None):
self.image = image
self.last_seen = parse(last_seen)
self.damage = damage
self.year = year
self.price = price
self.name = name
self.brand = brand
self.id = id
def to_dict(self):
return {
'brand': self.brand,
'name': self.name,
'price': self.price,
'year': self.year,
'damage': self.damage,
'last_seen': self.last_seen.isoformat(),
'id': self.id,
'image': self.image
}
|
[
"dateutil.parser.parse"
] |
[((183, 199), 'dateutil.parser.parse', 'parse', (['last_seen'], {}), '(last_seen)\n', (188, 199), False, 'from dateutil.parser import parse\n')]
|
import scrapy
import re
from scrapy.loader import ItemLoader
from machete.items import VersionItem
class versionSpider(scrapy.Spider):
name = 'versioninspector'
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36',
'FEED_URI': 'acordeones.json',
'FEED_FORMAT': 'json',
'FEED_EXPORT_ENCODING': 'utf-8'
}
start_urls = ['https://reactjs.org']
def parse(self, response):
i = 0
item = ItemLoader(item=VersionItem(), response=response)
item.add_xpath('version', '//header//div/a[@href="/versions"]/text()[2]')
item._add_value('source', self.start_urls[i])
i = i+1
yield item.load_item()
# //header//div/a[@href="/versions"]/text()[2] react
# //h1/text() scrapy
# REGEX DE VERSION \d+\.\d+\.*\d*
# REGEX MEJOR \d{1,2}\.\d{1,2}\.?\d{0,2}
# [\d]{1,2}\.[\d]{1,2}\.{0,1}\d{0,2}
|
[
"machete.items.VersionItem"
] |
[((584, 597), 'machete.items.VersionItem', 'VersionItem', ([], {}), '()\n', (595, 597), False, 'from machete.items import VersionItem\n')]
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import numpy as np
import thermosteam as tmo
import flexsolve as flx
from warnings import warn
from thermosteam import functional as fn
from . import indexer
from . import equilibrium as eq
from . import units_of_measure as thermo_units
from collections.abc import Iterable
from .exceptions import DimensionError, InfeasibleRegion
from chemicals.elements import array_to_atoms, symbol_to_index
from . import utils
from .constants import g
__all__ = ('Stream', )
# %% Utilities
mol_units = indexer.ChemicalMolarFlowIndexer.units
mass_units = indexer.ChemicalMassFlowIndexer.units
vol_units = indexer.ChemicalVolumetricFlowIndexer.units
class StreamData:
__slots__ = ('_imol', '_T', '_P', '_phases')
def __init__(self, imol, thermal_condition, phases):
self._imol = imol.copy()
self._T = thermal_condition._T
self._P = thermal_condition._P
self._phases = phases
# %%
@utils.units_of_measure(thermo_units.stream_units_of_measure)
@utils.thermo_user
@utils.registered(ticket_name='s')
class Stream:
"""
Create a Stream object that defines material flow rates
along with its thermodynamic state. Thermodynamic and transport
properties of a stream are available as properties, while
thermodynamic equilbrium (e.g. VLE, and bubble and dew points)
are available as methods.
Parameters
----------
ID : str, optional
A unique identification. If ID is None, stream will not be registered.
If no ID is given, stream will be registered with a unique ID.
flow : Iterable[float], optional
All flow rates corresponding to chemical `IDs`.
phase : 'l', 'g', or 's'
Either gas (g), liquid (l), or solid (s). Defaults to 'l'.
T : float
Temperature [K]. Defaults to 298.15.
P : float
Pressure [Pa]. Defaults to 101325.
units : str, optional
Flow rate units of measure (only mass, molar, and
volumetric flow rates are valid). Defaults to 'kmol/hr'.
price : float, optional
Price per unit mass [USD/kg]. Defaults to 0.
total_flow : float, optional
Total flow rate.
thermo : :class:`~thermosteam.Thermo`, optional
Thermo object to initialize input and output streams. Defaults to
`biosteam.settings.get_thermo()`.
characterization_factors : dict, optional
Characterization factors for life cycle assessment.
**chemical_flows : float
ID - flow pairs.
Examples
--------
Before creating a stream, first set the chemicals:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
Create a stream, defining the thermodynamic condition and flow rates:
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.show(flow='kg/hr') # Use the show method to select units of display
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show(composition=True, flow='kg/hr') # Its also possible to show by composition
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
composition: Water 0.667
Ethanol 0.333
------- 30 kg/hr
All flow rates are stored as an array in the `mol` attribute:
>>> s1.mol # Molar flow rates [kmol/hr]
array([1.11 , 0.217])
Mass and volumetric flow rates are available as property arrays:
>>> s1.mass
property_array([20.0, 10.0])
>>> s1.vol
property_array([0.02006, 0.012724])
These arrays work just like ordinary arrays, but the data is linked to the molar flows:
>>> # Mass flows are always up to date with molar flows
>>> s1.mol[0] = 1
>>> s1.mass[0]
18.015
>>> # Changing mass flows changes molar flows
>>> s1.mass[0] *= 2
>>> s1.mol[0]
2.0
>>> # Property arrays act just like normal arrays
>>> s1.mass + 2
array([38.031, 12. ])
The temperature, pressure and phase are attributes as well:
>>> (s1.T, s1.P, s1.phase)
(298.15, 101325.0, 'l')
The most convinient way to get and set flow rates is through
the `get_flow` and `set_flow` methods:
>>> # Set flow
>>> s1.set_flow(1, 'gpm', 'Water')
>>> s1.get_flow('gpm', 'Water')
1.0
>>> # Set multiple flows
>>> s1.set_flow([10, 20], 'kg/hr', ('Ethanol', 'Water'))
>>> s1.get_flow('kg/hr', ('Ethanol', 'Water'))
array([10., 20.])
It is also possible to index using IDs through the
`imol`, `imass`, and `ivol` indexers:
>>> s1.imol.show()
ChemicalMolarFlowIndexer (kmol/hr):
(l) Water 1.11
Ethanol 0.2171
>>> s1.imol['Water']
1.1101687012358397
>>> s1.imol['Ethanol', 'Water']
array([0.217, 1.11 ])
Thermodynamic properties are available as stream properties:
>>> s1.H # Enthalpy (kJ/hr)
0.0
Note that the reference enthalpy is 0.0 at the reference
temperature of 298.15 K, and pressure of 101325 Pa.
Retrive the enthalpy at a 10 degC above the reference.
>>> s1.T += 10
>>> s1.H
1083.467954...
Other thermodynamic properties are temperature and pressure dependent as well:
>>> s1.rho # Density [kg/m3]
908.648
It may be more convinient to get properties with different units:
>>> s1.get_property('rho', 'g/cm3')
0.90864
It is also possible to set some of the properties in different units:
>>> s1.set_property('T', 40, 'degC')
>>> s1.T
313.15
Bubble point and dew point computations can be performed through stream methods:
>>> bp = s1.bubble_point_at_P() # Bubble point at constant pressure
>>> bp
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
The bubble point results contain all results as attributes:
>>> bp.T # Temperature [K]
357.088...
>>> bp.y # Vapor composition
array([0.49, 0.51])
Vapor-liquid equilibrium can be performed by setting 2 degrees of freedom from the following list: `T` [Temperature; in K], `P` [Pressure; in Pa], `V` [Vapor fraction], `H` [Enthalpy; in kJ/hr].
Set vapor fraction and pressure of the stream:
>>> s1.vle(P=101325, V=0.5)
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 364.8 K, P: 101325 Pa
flow (kmol/hr): (g) Water 0.472
Ethanol 0.192
(l) Water 0.638
Ethanol 0.0255
Note that the stream is a now a MultiStream object to manage multiple phases.
Each phase can be accessed separately too:
>>> s1['l'].show()
Stream:
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.638
Ethanol 0.0255
>>> s1['g'].show()
Stream:
phase: 'g', T: 364.8 K, P: 101325 Pa
flow (kmol/hr): Water 0.472
Ethanol 0.192
We can convert a MultiStream object back to a Stream object by setting the phase:
>>> s1.phase = 'l'
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 364.8 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
"""
__slots__ = (
'_ID', '_imol', '_thermal_condition', '_thermo', '_streams',
'_bubble_point_cache', '_dew_point_cache',
'_vle_cache', '_lle_cache', '_sle_cache',
'_sink', '_source', '_price', '_islinked', '_property_cache_key',
'_property_cache', 'characterization_factors', '_user_equilibrium',
# '_velocity', '_height'
)
line = 'Stream'
#: [DisplayUnits] Units of measure for IPython display (class attribute)
display_units = thermo_units.DisplayUnits(T='K', P='Pa',
flow=('kmol/hr', 'kg/hr', 'm3/hr'),
composition=False,
N=7)
_units_of_measure = thermo_units.stream_units_of_measure
_flow_cache = {}
def __init__(self, ID= '', flow=(), phase='l', T=298.15, P=101325.,
units=None, price=0., total_flow=None, thermo=None,
characterization_factors=None,
# velocity=0., height=0.,
**chemical_flows):
#: dict[obj, float] Characterization factors for life cycle assessment in impact / kg.
self.characterization_factors = {} if characterization_factors is None else {}
self._thermal_condition = tmo.ThermalCondition(T, P)
thermo = self._load_thermo(thermo)
chemicals = thermo.chemicals
self.price = price
# self.velocity = velocity
# self.height = height
if units:
name, factor = self._get_flow_name_and_factor(units)
if name == 'mass':
group_wt_compositions = chemicals._group_wt_compositions
for cID in tuple(chemical_flows):
if cID in group_wt_compositions:
compositions = group_wt_compositions[cID]
group_flow = chemical_flows.pop(cID)
chemical_group = chemicals[cID]
for i in range(len(chemical_group)):
chemical_flows[chemical_group[i]._ID] = group_flow * compositions[i]
elif name == 'vol':
group_wt_compositions = chemicals._group_wt_compositions
for cID in chemical_flows:
if cID in group_wt_compositions:
raise ValueError(f"cannot set volumetric flow by chemical group '{i}'")
self._init_indexer(flow, phase, chemicals, chemical_flows)
mol = self.mol
flow = getattr(self, name)
if total_flow is not None: mol *= total_flow / mol.sum()
material_data = mol / factor
flow[:] = material_data
else:
self._init_indexer(flow, phase, chemicals, chemical_flows)
if total_flow:
mol = self.mol
mol *= total_flow / mol.sum()
self._sink = self._source = None # For BioSTEAM
self.reset_cache()
self._register(ID)
self._islinked = False
self._user_equilibrium = None
def reset_flow(self, phase=None, units=None, total_flow=None, **chemical_flows):
"""
Convinience method for resetting flow rate data.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1)
>>> s1.reset_flow(Ethanol=1, phase='g', units='kg/hr', total_flow=2)
>>> s1.show('cwt')
Stream: s1
phase: 'g', T: 298.15 K, P: 101325 Pa
composition: Ethanol 1
------- 2 kg/hr
"""
imol = self._imol
imol.empty()
if phase: imol.phase = phase
if chemical_flows:
keys, values = zip(*chemical_flows.items())
if units is None:
self.imol[keys] = values
else:
self.set_flow(values, units, keys)
if total_flow:
if units is None:
self.F_mol = total_flow
else:
self.set_total_flow(total_flow, units)
def _reset_thermo(self, thermo):
if thermo is self._thermo: return
self._thermo = thermo
self._imol.reset_chemicals(thermo.chemicals)
self._islinked = False
self.reset_cache()
if hasattr(self, '_streams'):
for phase, stream in self._streams.items():
stream._imol = self._imol.get_phase(phase)
stream._thermo = thermo
def user_equilibrium(self, *args, **kwargs):
return self._user_equilibrium(self, *args, **kwargs)
def set_user_equilibrium(self, f):
self._user_equilibrium = f
@property
def has_user_equilibrium(self):
return self._user_equilibrium is not None
def get_CF(self, key, units=None):
"""
Returns the life-cycle characterization factor on a kg basis given the
impact indicator key.
Parameters
----------
key : str
Key of impact indicator.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
try:
value = self.characterization_factors[key]
except:
return 0.
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.convert(value, units)
return value
def set_CF(self, key, value, units=None):
"""
Set the life-cycle characterization factor on a kg basis given the
impact indicator key and the units of measure.
Parameters
----------
key : str
Key of impact indicator.
value : float
Characterization factor value.
units : str, optional
Units of impact indicator. Before using this argument, the default units
of the impact indicator should be defined with
thermosteam.settings.define_impact_indicator.
Units must also be dimensionally consistent with the default units.
"""
if units is not None:
original_units = tmo.settings.get_impact_indicator_units(key)
value = original_units.unconvert(value, units)
self.characterization_factors[key] = value
def get_impact(self, key):
"""Return hourly rate of the impact indicator given the key."""
cfs = self.characterization_factors
return cfs[key] * self.F_mass if key in cfs else 0.
def empty_negative_flows(self):
"""
Replace flows of all components with negative values with 0.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=1, Ethanol=-1)
>>> s1.empty_negative_flows()
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 1
"""
data = self._imol._data
data[data < 0.] = 0.
def shares_flow_rate_with(self, other):
"""
Return whether other stream shares data with this one.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> other = s1.flow_proxy()
>>> s1.shares_flow_rate_with(other)
True
>>> s1 = tmo.MultiStream('s1', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s1)
True
>>> s2 = tmo.MultiStream('s2', phases=('l', 'g'))
>>> s1['g'].shares_flow_rate_with(s2)
False
>>> s1['g'].shares_flow_rate_with(s2['g'])
False
"""
imol = self._imol
other_imol = other._imol
if imol.__class__ is other_imol.__class__ and imol._data is other_imol._data:
shares_data = True
elif isinstance(other, tmo.MultiStream):
phase = self.phase
substreams = other._streams
if phase in substreams:
substream = substreams[phase]
shares_data = self.shares_flow_rate_with(substream)
else:
shares_data = False
else:
shares_data = False
return shares_data
def as_stream(self):
"""Does nothing."""
def get_data(self):
"""
Return a StreamData object containing data on material flow rates,
temperature, pressure, and phase(s).
See Also
--------
Stream.set_data
Examples
--------
Get and set data from stream at different conditions
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream('stream', Water=10)
>>> data = stream.get_data()
>>> stream.vle(V=0.5, P=101325)
>>> data_vle = stream.get_data()
>>> stream.set_data(data)
>>> stream.show()
Stream: stream
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> stream.set_data(data_vle)
>>> stream.show()
MultiStream: stream
phases: ('g', 'l'), T: 373.12 K, P: 101325 Pa
flow (kmol/hr): (g) Water 5
(l) Water 5
Note that only StreamData objects are valid for this method:
>>> stream.set_data({'T': 298.15})
Traceback (most recent call last):
ValueError: stream_data must be a StreamData object; not dict
"""
return StreamData(self._imol, self._thermal_condition, self.phases)
def set_data(self, stream_data):
"""
Set material flow rates, temperature, pressure, and phase(s) through a
StreamData object
See Also
--------
Stream.get_data
"""
if isinstance(stream_data, StreamData):
self.phases = stream_data._phases
self._imol.copy_like(stream_data._imol)
self._thermal_condition.copy_like(stream_data)
else:
raise ValueError(f'stream_data must be a StreamData object; not {type(stream_data).__name__}')
@property
def price(self):
"""[float] Price of stream per unit mass [USD/kg]."""
return self._price
@price.setter
def price(self, price):
if np.isfinite(price):
self._price = float(price)
else:
raise AttributeError(f'price must be finite, not {price}')
# @property
# def velocity(self):
# """[float] Velocity of stream [m/s]."""
# return self._velocity
# @velocity.setter
# def velocity(self, velocity):
# if np.isfinite(velocity):
# self._velocity = float(velocity)
# else:
# raise AttributeError(f'velocity must be finite, not {velocity}')
# @property
# def height(self):
# """[float] Relative height of stream [m]."""
# return self._height
# @height.setter
# def height(self, height):
# if np.isfinite(height):
# self._height = float(height)
# else:
# raise AttributeError(f'height must be finite, not {height}')
# @property
# def potential_energy(self):
# """[float] Potential energy flow rate [kW]"""
# return (g * self.height * self.F_mass) / 3.6e6
# @property
# def kinetic_energy(self):
# """[float] Kinetic energy flow rate [kW]"""
# return 0.5 * self.F_mass / 3.6e6 * self._velocity * self._velocity
def isempty(self):
"""
Return whether or not stream is empty.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream()
>>> stream.isempty()
True
"""
return self._imol.isempty()
def sanity_check(self):
"""
Raise an InfeasibleRegion error if flow rates are infeasible.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1')
>>> s1.sanity_check()
>>> s1.mol[0] = -1.
>>> s1.sanity_check()
Traceback (most recent call last):
InfeasibleRegion: negative material flow rate is infeasible
"""
material = self._imol._data
if material[material < 0.].any(): raise InfeasibleRegion('negative material flow rate')
@property
def vapor_fraction(self):
"""Molar vapor fraction."""
return 1.0 if self.phase in 'gG' else 0.0
@property
def liquid_fraction(self):
"""Molar liquid fraction."""
return 1.0 if self.phase in 'lL' else 0.0
@property
def solid_fraction(self):
"""Molar solid fraction."""
return 1.0 if self.phase in 'sS' else 0.0
def isfeed(self):
"""Return whether stream has a sink but no source."""
return bool(self._sink and not self._source)
def isproduct(self):
"""Return whether stream has a source but no sink."""
return bool(self._source and not self._sink)
@property
def main_chemical(self):
"""[str] ID of chemical with the largest mol fraction in stream."""
return self.chemicals.tuple[self.mol.argmax()].ID
def disconnect_source(self):
"""Disconnect stream from source."""
source = self._source
if source:
outs = source.outs
index = outs.index(self)
outs[index] = None
def disconnect_sink(self):
"""Disconnect stream from sink."""
sink = self._sink
if sink:
ins = sink.ins
index = ins.index(self)
ins[index] = None
def disconnect(self):
"""Disconnect stream from unit operations."""
self.disconnect_source()
self.disconnect_sink()
def _init_indexer(self, flow, phase, chemicals, chemical_flows):
"""Initialize molar flow rates."""
if len(flow) == 0:
if chemical_flows:
imol = indexer.ChemicalMolarFlowIndexer(phase, chemicals=chemicals, **chemical_flows)
else:
imol = indexer.ChemicalMolarFlowIndexer.blank(phase, chemicals)
else:
assert not chemical_flows, ("may specify either 'flow' or "
"'chemical_flows', but not both")
if isinstance(flow, indexer.ChemicalMolarFlowIndexer):
imol = flow
imol.phase = phase
else:
imol = indexer.ChemicalMolarFlowIndexer.from_data(
np.asarray(flow, dtype=float), phase, chemicals)
self._imol = imol
def reset_cache(self):
"""Reset cache regarding equilibrium methods."""
self._bubble_point_cache = eq.BubblePointCache()
self._dew_point_cache = eq.DewPointCache()
self._property_cache_key = None, None, None
self._property_cache = {}
@classmethod
def _get_flow_name_and_factor(cls, units):
cache = cls._flow_cache
if units in cache:
name, factor = cache[units]
else:
dimensionality = thermo_units.get_dimensionality(units)
if dimensionality == mol_units.dimensionality:
name = 'mol'
factor = mol_units.conversion_factor(units)
elif dimensionality == mass_units.dimensionality:
name = 'mass'
factor = mass_units.conversion_factor(units)
elif dimensionality == vol_units.dimensionality:
name = 'vol'
factor = vol_units.conversion_factor(units)
else:
raise DimensionError("dimensions for flow units must be in molar, "
"mass or volumetric flow rates, not "
f"'{dimensionality}'")
cache[units] = name, factor
return name, factor
### Property getters ###
def get_atomic_flow(self, symbol):
"""
Return flow rate of atom in kmol / hr given the atomic symbol.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flow('H') # kmol/hr of H
2.0
>>> stream.get_atomic_flow('O') # kmol/hr of O
1.0
"""
return (self.chemicals.formula_array[symbol_to_index[symbol], :] * self.mol).sum()
def get_atomic_flows(self):
"""
Return dictionary of atomic flow rates in kmol / hr.
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> stream = tmo.Stream(Water=1)
>>> stream.get_atomic_flows()
{'H': 2.0, 'O': 1.0}
"""
return array_to_atoms(self.chemicals.formula_array @ self.mol)
def get_flow(self, units, key=...):
"""
Return an flow rates in requested units.
Parameters
----------
units : str
Units of measure.
key : tuple[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_flow('kg/hr', 'Water')
20.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
return factor * indexer[key]
def set_flow(self, data, units, key=...):
"""
Set flow rates in given units.
Parameters
----------
data : 1d ndarray or float
Flow rate data.
units : str
Units of measure.
key : Iterable[str] or str, optional
Chemical identifiers.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_flow(10, 'kg/hr', 'Water')
>>> s1.get_flow('kg/hr', 'Water')
10.0
"""
name, factor = self._get_flow_name_and_factor(units)
indexer = getattr(self, 'i' + name)
indexer[key] = np.asarray(data, dtype=float) / factor
def get_total_flow(self, units):
"""
Get total flow rate in given units.
Parameters
----------
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.get_total_flow('kg/hr')
30.0
"""
name, factor = self._get_flow_name_and_factor(units)
flow = getattr(self, 'F_' + name)
return factor * flow
def set_total_flow(self, value, units):
"""
Set total flow rate in given units keeping the composition constant.
Parameters
----------
value : float
New total flow rate.
units : str
Units of measure.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.set_total_flow(1.0,'kg/hr')
>>> s1.get_total_flow('kg/hr')
0.9999999999999999
"""
name, factor = self._get_flow_name_and_factor(units)
setattr(self, 'F_' + name, value / factor)
### Stream data ###
@property
def source(self):
"""[Unit] Outlet location."""
return self._source
@property
def sink(self):
"""[Unit] Inlet location."""
return self._sink
@property
def thermal_condition(self):
"""
[ThermalCondition] Contains the temperature and pressure conditions
of the stream.
"""
return self._thermal_condition
@property
def T(self):
"""[float] Temperature in Kelvin."""
return self._thermal_condition._T
@T.setter
def T(self, T):
self._thermal_condition._T = float(T)
@property
def P(self):
"""[float] Pressure in Pascal."""
return self._thermal_condition._P
@P.setter
def P(self, P):
self._thermal_condition._P = float(P)
@property
def phase(self):
"""Phase of stream."""
return self._imol._phase._phase
@phase.setter
def phase(self, phase):
self._imol._phase.phase = phase
@property
def mol(self):
"""[array] Molar flow rates in kmol/hr."""
return self._imol._data
@mol.setter
def mol(self, value):
mol = self.mol
if mol is not value: mol[:] = value
@property
def mass(self):
"""[property_array] Mass flow rates in kg/hr."""
return self.imass._data
@mass.setter
def mass(self, value):
mass = self.mass
if mass is not value: mass[:] = value
@property
def vol(self):
"""[property_array] Volumetric flow rates in m3/hr."""
return self.ivol._data
@vol.setter
def vol(self, value):
vol = self.vol
if vol is not value:
vol[:] = value
@property
def imol(self):
"""[Indexer] Flow rate indexer with data in kmol/hr."""
return self._imol
@property
def imass(self):
"""[Indexer] Flow rate indexer with data in kg/hr."""
return self._imol.by_mass()
@property
def ivol(self):
"""[Indexer] Flow rate indexer with data in m3/hr."""
return self._imol.by_volume(self._thermal_condition)
### Net flow properties ###
@property
def cost(self):
"""[float] Total cost of stream in USD/hr."""
return self.price * self.F_mass
@property
def F_mol(self):
"""[float] Total molar flow rate in kmol/hr."""
return self._imol._data.sum()
@F_mol.setter
def F_mol(self, value):
F_mol = self.F_mol
if not F_mol: raise AttributeError("undefined composition; cannot set flow rate")
self._imol._data[:] *= value/F_mol
@property
def F_mass(self):
"""[float] Total mass flow rate in kg/hr."""
return np.dot(self.chemicals.MW, self.mol)
@F_mass.setter
def F_mass(self, value):
F_mass = self.F_mass
if not F_mass: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value/F_mass
@property
def F_vol(self):
"""[float] Total volumetric flow rate in m3/hr."""
F_mol = self.F_mol
return 1000. * self.V * F_mol if F_mol else 0.
@F_vol.setter
def F_vol(self, value):
F_vol = self.F_vol
if not F_vol: raise AttributeError("undefined composition; cannot set flow rate")
self.imol._data[:] *= value / F_vol
@property
def H(self):
"""[float] Enthalpy flow rate in kJ/hr."""
H = self._get_property_cache('H', True)
if H is None:
self._property_cache['H'] = H = self.mixture.H(
self.phase, self.mol, *self._thermal_condition
)
return H
@H.setter
def H(self, H: float):
if not H and self.isempty(): return
try: self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
except Exception as error: # pragma: no cover
phase = self.phase.lower()
if phase == 'g':
# Maybe too little heat, liquid must be present
self.phase = 'l'
elif phase == 'l':
# Maybe too much heat, gas must be present
self.phase = 'g'
else:
raise error
self.T = self.mixture.solve_T(self.phase, self.mol, H,
*self._thermal_condition)
@property
def S(self):
"""[float] Absolute entropy flow rate in kJ/hr."""
S = self._get_property_cache('S', True)
if S is None:
self._property_cache['S'] = S = self.mixture.S(
self.phase, self.mol, *self._thermal_condition
)
return S
@property
def Hnet(self):
"""[float] Total enthalpy flow rate (including heats of formation) in kJ/hr."""
return self.H + self.Hf
@property
def Hf(self):
"""[float] Enthalpy of formation flow rate in kJ/hr."""
return (self.chemicals.Hf * self.mol).sum()
@property
def LHV(self):
"""[float] Lower heating value flow rate in kJ/hr."""
return (self.chemicals.LHV * self.mol).sum()
@property
def HHV(self):
"""[float] Higher heating value flow rate in kJ/hr."""
return (self.chemicals.HHV * self.mol).sum()
@property
def Hvap(self):
"""[float] Enthalpy of vaporization flow rate in kJ/hr."""
mol = self.mol
T = self._thermal_condition._T
Hvap = self._get_property_cache('Hvap', True)
if Hvap is None:
self._property_cache['Hvap'] = Hvap = sum([
i*j.Hvap(T) for i,j in zip(mol, self.chemicals)
if i and not j.locked_state
])
return Hvap
def _get_property_cache(self, name, flow=False):
property_cache = self._property_cache
thermal_condition = self._thermal_condition
imol = self._imol
data = imol._data
total = data.sum()
if total == 0.: return 0.
composition = data / total
literal = (imol._phase._phase, thermal_condition._T, thermal_condition._P)
last_literal, last_composition, last_total = self._property_cache_key
if literal == last_literal and (composition == last_composition).all():
prop = property_cache.get(name)
if not prop: return prop
if flow:
return prop * total / last_total
else:
return prop
else:
self._property_cache_key = (literal, composition, total)
property_cache.clear()
return None
@property
def C(self):
"""[float] Heat capacity flow rate in kJ/hr."""
C = self._get_property_cache('C', True)
if C is None:
self._property_cache['C'] = C = self.mixture.Cn(self.phase, self.mol, self.T)
return C
### Composition properties ###
@property
def z_mol(self):
"""[1d array] Molar composition."""
mol = self.mol
z = mol / mol.sum()
z.setflags(0)
return z
@property
def z_mass(self):
"""[1d array] Mass composition."""
mass = self.chemicals.MW * self.mol
F_mass = mass.sum()
if F_mass == 0:
z = mass
else:
z = mass / mass.sum()
z.setflags(0)
return z
@property
def z_vol(self):
"""[1d array] Volumetric composition."""
vol = 1. * self.vol
z = vol / vol.sum()
z.setflags(0)
return z
@property
def MW(self):
"""[float] Overall molecular weight."""
return self.mixture.MW(self.mol)
@property
def V(self):
"""[float] Molar volume [m^3/mol]."""
V = self._get_property_cache('V')
if V is None:
self._property_cache['V'] = V = self.mixture.V(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return V
@property
def kappa(self):
"""[float] Thermal conductivity [W/m/k]."""
kappa = self._get_property_cache('kappa')
if kappa is None:
self._property_cache['kappa'] = kappa = self.mixture.kappa(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return kappa
@property
def Cn(self):
"""[float] Molar heat capacity [J/mol/K]."""
Cn = self._get_property_cache('Cn')
if Cn is None:
self._property_cache['Cn'] = Cn = self.mixture.Cn(
*self._imol.get_phase_and_composition(),
self.T
)
return Cn
@property
def mu(self):
"""[float] Hydrolic viscosity [Pa*s]."""
mu = self._get_property_cache('mu')
if mu is None:
self._property_cache['mu'] = mu = self.mixture.mu(
*self._imol.get_phase_and_composition(),
*self._thermal_condition
)
return mu
@property
def sigma(self):
"""[float] Surface tension [N/m]."""
mol = self.mol
sigma = self._get_property_cache('sigma')
if sigma is None:
self._property_cache['sigma'] = sigma = self.mixture.sigma(
mol / mol.sum(), *self._thermal_condition
)
return sigma
@property
def epsilon(self):
"""[float] Relative permittivity [-]."""
mol = self.mol
epsilon = self._get_property_cache('epsilon')
if epsilon is None:
self._property_cache['epsilon'] = epsilon = self.mixture.epsilon(
mol / mol.sum(), *self._thermal_condition
)
return epsilon
@property
def Cp(self):
"""[float] Heat capacity [J/g/K]."""
return self.Cn / self.MW
@property
def alpha(self):
"""[float] Thermal diffusivity [m^2/s]."""
return fn.alpha(self.kappa,
self.rho,
self.Cp * 1000.)
@property
def rho(self):
"""[float] Density [kg/m^3]."""
return fn.V_to_rho(self.V, self.MW)
@property
def nu(self):
"""[float] Kinematic viscosity [m^2/s]."""
return fn.mu_to_nu(self.mu, self.rho)
@property
def Pr(self):
"""[float] Prandtl number [-]."""
return fn.Pr(self.Cp * 1000,
self.kappa,
self.mu)
### Stream methods ###
@property
def available_chemicals(self):
"""list[Chemical] All chemicals with nonzero flow."""
return [i for i, j in zip(self.chemicals, self.mol) if j]
def in_thermal_equilibrium(self, other):
"""
Return whether or not stream is in thermal equilibrium with
another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> stream = Stream(Water=1, T=300)
>>> other = Stream(Water=1, T=300)
>>> stream.in_thermal_equilibrium(other)
True
"""
return self._thermal_condition.in_equilibrium(other._thermal_condition)
@classmethod
def sum(cls, streams, ID=None, thermo=None, energy_balance=True):
"""
Return a new Stream object that represents the sum of all given streams.
Examples
--------
Sum two streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum')
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Sum two streams with new property package:
>>> thermo = tmo.Thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s_sum = tmo.Stream.sum([s1, s1], 's_sum', thermo)
>>> s_sum.show(flow='kg/hr')
Stream: s_sum
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
"""
new = cls(ID, thermo=thermo)
if streams: new.copy_thermal_condition(streams[0])
new.mix_from(streams, energy_balance)
return new
def separate_out(self, other, energy_balance=True):
"""
Separate out given stream from this one.
Examples
--------
Separate out another stream with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=30, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=5, units='kg/hr')
>>> s1.separate_out(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 5
It's also possible to separate out streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream.sum([s1, s2], 's_mix')
>>> s_mix.separate_out(s2)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Removing empty streams is fine too:
>>> s1.empty(); s_mix.separate_out(s1)
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
"""
if other:
if self is other: self.empty()
if energy_balance: H_new = self.H - other.H
self._imol.separate_out(other._imol)
if energy_balance: self.H = H_new
def mix_from(self, others, energy_balance=True, vle=False):
"""
Mix all other streams into this one, ignoring its initial contents.
Examples
--------
Mix two streams with the same thermodynamic property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.copy('s2')
>>> s1.mix_from([s1, s2])
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
It's also possible to mix streams with different property packages
so long as all chemicals are defined in the mixed stream's property
package:
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s1 = tmo.Stream('s1', Water=40, units='kg/hr')
>>> tmo.settings.set_thermo(['Ethanol'], cache=True)
>>> s2 = tmo.Stream('s2', Ethanol=20, units='kg/hr')
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s_mix = tmo.Stream('s_mix')
>>> s_mix.mix_from([s1, s2])
>>> s_mix.show(flow='kg/hr')
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 40
Ethanol 20
Mixing empty streams is fine too:
>>> s1.empty(); s2.empty(); s_mix.mix_from([s1, s2])
>>> s_mix.show()
Stream: s_mix
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
"""
others = [i for i in others if i]
N_others = len(others)
if N_others == 0:
self.empty()
elif N_others == 1:
self.copy_like(others[0])
elif vle:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
if energy_balance:
H = sum([i.H for i in others])
self.vle(H=self.H, P=self.P)
else:
self.vle(T=self.T, P=self.P)
else:
self.P = min([i.P for i in others])
if energy_balance: H = sum([i.H for i in others])
self._imol.mix_from([i._imol for i in others])
if energy_balance and not self.isempty():
try:
self.H = H
except:
phases = ''.join([i.phase for i in others])
self.phases = tuple(set(phases))
self._imol.mix_from([i._imol for i in others])
self.H = H
def split_to(self, s1, s2, split, energy_balance=True):
"""
Split molar flow rate from this stream to two others given
the split fraction or an array of split fractions.
Examples
--------
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol'], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s = tmo.Stream('s', Water=20, Ethanol=10, units='kg/hr')
>>> s1 = tmo.Stream('s1')
>>> s2 = tmo.Stream('s2')
>>> split = chemicals.kwarray(dict(Water=0.5, Ethanol=0.1))
>>> s.split_to(s1, s2, split)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 1
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 10
Ethanol 9
"""
mol = self.mol
chemicals = self.chemicals
values = mol * split
dummy = mol - values
if s1.chemicals is chemicals:
s1.mol[:] = values
else:
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s1.empty()
s1._imol[CASs] = values
values = dummy
if s2.chemicals is chemicals:
s2.mol[:] = values
else:
s2.empty()
CASs, values = zip(*[(i, j) for i, j in zip(chemicals.CASs, values) if j])
s2._imol[CASs] = values
if energy_balance:
tc1 = s1._thermal_condition
tc2 = s2._thermal_condition
tc = self._thermal_condition
tc1._T = tc2._T = tc._T
tc1._P = tc2._P = tc._P
s1.phase = s2.phase = self.phase
def link_with(self, other, flow=True, phase=True, TP=True):
"""
Link with another stream.
Parameters
----------
other : Stream
flow : bool, defaults to True
Whether to link the flow rate data.
phase : bool, defaults to True
Whether to link the phase.
TP : bool, defaults to True
Whether to link the temperature and pressure.
See Also
--------
:obj:`~Stream.flow_proxy`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.mol is s2.mol
True
>>> s2.thermal_condition is s1.thermal_condition
True
>>> s1.phase = 'g'
>>> s2.phase
'g'
"""
if not isinstance(other._imol, self._imol.__class__):
at_unit = f" at unit {self.source}" if self.source is other.sink else ""
raise RuntimeError(f"stream {self} cannot link with stream {other}" + at_unit
+ "; streams must have the same class to link")
if self._islinked and not (self.source is other.sink or self.sink is other.source):
raise RuntimeError(f"stream {self} cannot link with stream {other};"
f" {self} already linked")
if TP and flow and (phase or self._imol._data.ndim == 2):
self._imol._data_cache = other._imol._data_cache
else:
self._imol._data_cache.clear()
if TP:
self._thermal_condition = other._thermal_condition
if flow:
self._imol._data = other._imol._data
if phase and self._imol._data.ndim == 1:
self._imol._phase = other._imol._phase
self._islinked = other._islinked = True
def unlink(self):
"""
Unlink stream from other streams.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.link_with(s1)
>>> s1.unlink()
>>> s2.mol is s1.mol
False
MultiStream phases cannot be unlinked:
>>> s1 = tmo.MultiStream(None, phases=('l', 'g'))
>>> s1['g'].unlink()
Traceback (most recent call last):
RuntimeError: phase is locked; stream cannot be unlinked
"""
imol = self._imol
if hasattr(imol, '_phase') and isinstance(imol._phase, tmo._phase.LockedPhase):
raise RuntimeError('phase is locked; stream cannot be unlinked')
if self._islinked:
imol._data_cache.clear()
imol._data = imol._data.copy()
imol._phase = imol._phase.copy()
self._thermal_condition = self._thermal_condition.copy()
self.reset_cache()
self._islinked = False
def copy_like(self, other):
"""
Copy all conditions of another stream.
Examples
--------
Copy data from another stream with the same property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
Copy data from another stream with a different property package:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> tmo.settings.set_thermo(['Water'], cache=True)
>>> s2 = tmo.Stream('s2', Water=2, units='kg/hr')
>>> s1.copy_like(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
if isinstance(other, tmo.MultiStream):
phase = other.phase
if len(phase) == 1:
imol = other._imol.to_chemical_indexer(phase)
else:
self.phases = other.phases
imol = other._imol
else:
imol = other._imol
self._imol.copy_like(imol)
self._thermal_condition.copy_like(other._thermal_condition)
def copy_thermal_condition(self, other):
"""
Copy thermal conditions (T and P) of another stream.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=2, units='kg/hr')
>>> s2 = tmo.Stream('s2', Water=1, units='kg/hr', T=300.00)
>>> s1.copy_thermal_condition(s2)
>>> s1.show(flow='kg/hr')
Stream: s1
phase: 'l', T: 300 K, P: 101325 Pa
flow (kg/hr): Water 2
"""
self._thermal_condition.copy_like(other._thermal_condition)
def copy_flow(self, other, IDs=..., *, remove=False, exclude=False):
"""
Copy flow rates of another stream to self.
Parameters
----------
other : Stream
Flow rates will be copied from here.
IDs=... : Iterable[str], defaults to all chemicals.
Chemical IDs.
remove=False: bool, optional
If True, copied chemicals will be removed from `stream`.
exclude=False: bool, optional
If True, exclude designated chemicals when copying.
Examples
--------
Initialize streams:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
Copy all flows:
>>> s2.copy_flow(s1)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Reset and copy just water flow:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water')
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Reset and copy all flows except water:
>>> s2.empty()
>>> s2.copy_flow(s1, 'Water', exclude=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Ethanol 10
Cut and paste flows:
>>> s2.copy_flow(s1, remove=True)
>>> s2.show(flow='kg/hr')
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
>>> s1.show()
Stream: s1
phase: 'l', T: 298.15 K, P: 101325 Pa
flow: 0
Its also possible to copy flows from a multistream:
>>> s1.phases = ('g', 'l')
>>> s1.imol['g', 'Water'] = 10
>>> s2.copy_flow(s1, remove=True)
>>> s2.show()
Stream: s2
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 10
>>> s1.show()
MultiStream: s1
phases: ('g', 'l'), T: 298.15 K, P: 101325 Pa
flow: 0
Copy flows except except water and remove water:
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = tmo.Stream('s2')
>>> s2.copy_flow(s1, 'Water', exclude=True, remove=True)
"""
other_mol = other.mol
other_chemicals = other.chemicals
chemicals = self.chemicals
if IDs == ...:
if exclude: return
if chemicals is other_chemicals:
self.mol[:] = other.mol
else:
self.empty()
CASs, values = zip(*[(i, j) for i, j in zip(other_chemicals.CASs, other_mol) if j])
self.imol[CASs] = values
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:] = 0.
else:
other_mol[:] = 0.
else:
if exclude:
if isinstance(IDs, str):
if IDs in other_chemicals:
bad_index = other_chemicals.index(IDs)
other_index = [i for i in range(other_chemicals.size) if i != bad_index]
else:
other_index = slice()
else:
IDs = [i for i in IDs if i in other_chemicals]
bad_index = set(other_chemicals.indices(IDs))
if bad_index:
other_index = [i for i in range(other_chemicals.size) if i not in bad_index]
else:
other_index = slice()
else:
other_index = other_chemicals.get_index(IDs)
if chemicals is other_chemicals:
self.mol[other_index] = other_mol[other_index]
else:
CASs = other_chemicals.CASs
other_index = [i for i in other_index if other_mol[i] or CASs[i] in chemicals]
self.imol[tuple([CASs[i] for i in other_index])] = other_mol[other_index]
if remove:
if isinstance(other, tmo.MultiStream):
other.imol.data[:, other_index] = 0
else:
other_mol[other_index] = 0
def copy(self, ID=None, thermo=None):
"""
Return a copy of the stream.
Examples
--------
Create a copy of a new stream:
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1_copy = s1.copy('s1_copy')
>>> s1_copy.show(flow='kg/hr')
Stream: s1_copy
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kg/hr): Water 20
Ethanol 10
Warnings
--------
Prices, LCA characterization factors are not copied are not copied.
"""
cls = self.__class__
new = cls.__new__(cls)
new._islinked = False
new._sink = new._source = None
new.characterization_factors = {}
new._thermo = thermo or self._thermo
new._imol = self._imol.copy()
if thermo and thermo.chemicals is not self.chemicals:
new._imol.reset_chemicals(thermo.chemicals)
new._thermal_condition = self._thermal_condition.copy()
new._user_equilibrium = self._user_equilibrium
new.reset_cache()
new.price = 0
new.ID = ID
return new
__copy__ = copy
def flow_proxy(self, ID=None):
"""
Return a new stream that shares flow rate data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.proxy`
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.flow_proxy()
>>> s2.mol is s1.mol
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = new._sink = new._source = None
new.price = 0
new._thermo = self._thermo
new._imol = imol = self._imol._copy_without_data()
imol._data = self._imol._data
new._thermal_condition = self._thermal_condition.copy()
new.reset_cache()
new.characterization_factors = {}
self._islinked = new._islinked = True
new._user_equilibrium = self._user_equilibrium
return new
def proxy(self, ID=None):
"""
Return a new stream that shares all thermochemical data with this one.
See Also
--------
:obj:`~Stream.link_with`
:obj:`~Stream.flow_proxy`
Warning
-------
Price and characterization factor data is not shared
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s2 = s1.proxy()
>>> s2.imol is s1.imol and s2.thermal_condition is s1.thermal_condition
True
"""
cls = self.__class__
new = cls.__new__(cls)
new.ID = None
new._sink = new._source = None
new.price = self.price
new._thermo = self._thermo
new._imol = self._imol
new._thermal_condition = self._thermal_condition
new._property_cache = self._property_cache
new._property_cache_key = self._property_cache_key
new._bubble_point_cache = self._bubble_point_cache
new._dew_point_cache = self._dew_point_cache
new._user_equilibrium = self._user_equilibrium
try: new._vle_cache = self._vle_cache
except AttributeError: pass
new.characterization_factors = {}
self._islinked = new._islinked = True
return new
def empty(self):
"""Empty stream flow rates.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, units='kg/hr')
>>> s1.empty()
>>> s1.F_mol
0.0
"""
self._imol._data[:] = 0.
### Equilibrium ###
@property
def vle(self):
"""[VLE] An object that can perform vapor-liquid equilibrium on the stream."""
self.phases = ('g', 'l')
return self.vle
@property
def lle(self):
"""[LLE] An object that can perform liquid-liquid equilibrium on the stream."""
self.phases = ('L', 'l')
return self.lle
@property
def sle(self):
"""[SLE] An object that can perform solid-liquid equilibrium on the stream."""
self.phases = ('s', 'l')
return self.sle
@property
def vle_chemicals(self):
"""list[Chemical] Chemicals cabable of liquid-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_vle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
@property
def lle_chemicals(self):
"""list[Chemical] Chemicals cabable of vapor-liquid equilibrium."""
chemicals = self.chemicals
chemicals_tuple = chemicals.tuple
indices = chemicals.get_lle_indices(self.mol != 0)
return [chemicals_tuple[i] for i in indices]
def get_bubble_point(self, IDs=None):
"""
Return a BubblePoint object capable of computing bubble points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_bubble_point()
BubblePoint([Water, Ethanol])
"""
chemicals = self.chemicals[IDs] if IDs else self.vle_chemicals
bp = self._bubble_point_cache(chemicals, self._thermo)
return bp
def get_dew_point(self, IDs=None):
"""
Return a DewPoint object capable of computing dew points.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.get_dew_point()
DewPoint([Water, Ethanol])
"""
chemicals = self.chemicals.retrieve(IDs) if IDs else self.vle_chemicals
dp = self._dew_point_cache(chemicals, self._thermo)
return dp
def bubble_point_at_T(self, T=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_T()
BubblePointValues(T=350.00, P=76622, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.486 0.514])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, T=T or self.T)
def bubble_point_at_P(self, P=None, IDs=None):
"""
Return a BubblePointResults object with all data on the bubble point at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.bubble_point_at_P()
BubblePointValues(T=357.09, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], y=[0.49 0.51])
"""
bp = self.get_bubble_point(IDs)
z = self.get_normalized_mol(bp.IDs)
return bp(z, P=P or self.P)
def dew_point_at_T(self, T=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant temperature.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_T()
DewPointValues(T=350.00, P=48991, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, T=T or self.T)
def dew_point_at_P(self, P=None, IDs=None):
"""
Return a DewPointResults object with all data on the dew point
at constant pressure.
Parameters
----------
IDs : Iterable[str], optional
Chemicals that participate in equilibrium. Defaults to all
chemicals in equilibrium.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, T=350, units='kg/hr')
>>> s1.dew_point_at_P()
DewPointValues(T=368.66, P=101325, IDs=('Water', 'Ethanol'), z=[0.836 0.164], x=[0.984 0.016])
"""
dp = self.get_dew_point(IDs)
z = self.get_normalized_mol(dp.IDs)
return dp(z, P=P or self.P)
def get_normalized_mol(self, IDs):
"""
Return normalized molar fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_normalized_mol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_mass(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_normalized_mass(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.imass[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_normalized_vol(self, IDs):
"""
Return normalized mass fractions of given chemicals. The sum of the result is always 1.
Parameters
----------
IDs : tuple[str]
IDs of chemicals to be normalized.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_normalized_vol(('Water', 'Ethanol'))
array([0.667, 0.333])
"""
z = self.ivol[IDs]
z_sum = z.sum()
if not z_sum: raise RuntimeError(f'{repr(self)} is empty')
return z / z_sum
def get_molar_fraction(self, IDs):
"""
Return molar fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kmol/hr')
>>> s1.get_molar_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mol = self.F_mol
return self.imol[IDs] / F_mol if F_mol else 0.
get_molar_composition = get_molar_fraction
def get_mass_fraction(self, IDs):
"""
Return mass fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='kg/hr')
>>> s1.get_mass_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_mass = self.F_mass
return self.imass[IDs] / F_mass if F_mass else 0.
get_mass_composition = get_mass_fraction
def get_volumetric_fraction(self, IDs):
"""
Return volumetric fraction of given chemicals.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_volumetric_fraction(('Water', 'Ethanol'))
array([0.5 , 0.25])
"""
F_vol = self.F_vol
return self.ivol[IDs] / F_vol if F_vol else 0.
get_volumetric_composition = get_volumetric_fraction
def get_concentration(self, IDs):
"""
Return concentration of given chemicals in kmol/m3.
Parameters
----------
IDs : tuple[str]
IDs of chemicals.
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol', 'Methanol'], cache=True)
>>> s1 = tmo.Stream('s1', Water=20, Ethanol=10, Methanol=10, units='m3/hr')
>>> s1.get_concentration(('Water', 'Ethanol'))
array([27.672, 4.265])
"""
F_vol = self.F_vol
return self.imol[IDs] / F_vol if F_vol else 0.
@property
def P_vapor(self):
"""Vapor pressure of liquid."""
chemicals = self.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, self.thermo)
IDs = tuple([i.ID for i in chemicals])
x = self.get_molar_fraction(IDs)
if x.sum() < 1e-12: return 0
return F_l(x, self.T).sum()
def receive_vent(self, other, energy_balance=True):
"""
Receive vapors from another stream by vapor-liquid equilibrium between
a gas and liquid stream assuming only a small amount of chemicals
in vapor-liquid equilibrium is present
Examples
--------
The energy balance is performed by default:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 323.13 K, P: 101325 Pa
flow (kmol/hr): Water 0.0798
Ethanol 0.0889
N2 0.739
Set energy balance to false to receive vent isothermally:
>>> import thermosteam as tmo
>>> chemicals = tmo.Chemicals(['Water', 'Ethanol', 'Methanol', tmo.Chemical('N2', phase='g')], cache=True)
>>> tmo.settings.set_thermo(chemicals)
>>> s1 = tmo.Stream('s1', N2=20, units='m3/hr', phase='g', T=330)
>>> s2 = tmo.Stream('s2', Water=10, Ethanol=2, T=330)
>>> s1.receive_vent(s2, energy_balance=False)
>>> s1.show(flow='kmol/hr')
Stream: s1
phase: 'g', T: 330 K, P: 101325 Pa
flow (kmol/hr): Water 0.111
Ethanol 0.123
N2 0.739
"""
assert self.phase == 'g', 'stream must be a gas to receive vent'
ms = tmo.Stream(None, T=self.T, P=self.P, thermo=self.thermo)
ms.mix_from([self, other], energy_balance=False)
if energy_balance: ms.H = H = self.H + other.H
ms.vle._setup()
chemicals = ms.vle_chemicals
F_l = eq.LiquidFugacities(chemicals, ms.thermo)
IDs = tuple([i.ID for i in chemicals])
x = other.get_molar_fraction(IDs)
T = ms.T
P = ms.P
vapor = ms['g']
liquid = ms['l']
F_mol_vapor = vapor.F_mol
mol_old = liquid.imol[IDs]
if energy_balance:
def equilibrium_approximation(T):
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
ms.H = H
return ms.T
flx.wegstein(equilibrium_approximation, T)
else:
f_l = F_l(x, T)
y = f_l / P
mol_new = F_mol_vapor * y
vapor.imol[IDs] = mol_new
liquid.imol[IDs] = mol_old - mol_new
index = liquid.mol < 0.
vapor.mol[index] += liquid.mol[index]
liquid.mol[index] = 0
self.copy_like(vapor)
other.copy_like(liquid)
self.T = other.T = ms.T
### Casting ###
@property
def islinked(self):
"""
[bool] Whether data regarding the thermal condition, material flow rates,
and phases are shared with other streams.
"""
return self._islinked
@property
def phases(self):
"""tuple[str] All phases present."""
return (self.phase,)
@phases.setter
def phases(self, phases):
if self.phases == phases: return
if self._islinked: self.unlink()
if len(phases) == 1:
self.phase = phases[0]
else:
self.__class__ = tmo.MultiStream
self._imol = self._imol.to_material_indexer(phases)
self._streams = {}
self._vle_cache = eq.VLECache(self._imol,
self._thermal_condition,
self._thermo,
self._bubble_point_cache,
self._dew_point_cache)
self._lle_cache = eq.LLECache(self._imol,
self._thermal_condition,
self._thermo)
self._sle_cache = eq.SLECache(self._imol,
self._thermal_condition,
self._thermo)
### Representation ###
def _basic_info(self):
return f"{type(self).__name__}: {self.ID or ''}\n"
def _info_phaseTP(self, phase, T_units, P_units):
T = thermo_units.convert(self.T, 'K', T_units)
P = thermo_units.convert(self.P, 'Pa', P_units)
s = '' if isinstance(phase, str) else 's'
return f" phase{s}: {repr(phase)}, T: {T:.5g} {T_units}, P: {P:.6g} {P_units}\n"
def _source_info(self):
source = self.source
return f"{source}-{source.outs.index(self)}" if source else self.ID
def _translate_layout(self, layout, flow, composition, N):
if layout:
for param in (flow, composition, N):
if param is not None: raise ValueError(f'cannot specify both `layout` and `{param}`')
if layout[0] == 'c':
composition = True
layout = layout[1:]
if layout.startswith('wt'):
flow = 'kg/hr'
layout = layout[2:]
elif layout.startswith('mol'):
flow = 'kmol/hr'
layout = layout[3:]
elif layout.startswith('vol'):
flow = 'm3/hr'
layout = layout[3:]
elif layout.isdigit():
flow = 'kmol/hr'
else:
raise ValueError(
"`layout` must have the form "
"{'c' or ''}{'wt', 'mol' or 'vol'}{# or ''};"
"for example: 'cwt100' corresponds to compostion=True, "
"flow='kg/hr', and N=100."
)
if layout.isdigit():
N = int(layout)
return flow, composition, N
def _info(self, layout, T, P, flow, composition, N, IDs):
"""Return string with all specifications."""
flow, composition, N = self._translate_layout(layout, flow, composition, N)
from .indexer import nonzeros
basic_info = self._basic_info()
if not IDs:
IDs = self.chemicals.IDs
data = self.imol.data
else:
data = self.imol[IDs]
IDs, data = nonzeros(IDs, data)
IDs = tuple(IDs)
display_units = self.display_units
T_units = T or display_units.T
P_units = P or display_units.P
flow_units = flow or display_units.flow
N_max = display_units.N if N is None else N
basic_info += self._info_phaseTP(self.phase, T_units, P_units)
if N_max == 0:
return basic_info[:-1]
composition = display_units.composition if composition is None else composition
N_IDs = len(IDs)
if N_IDs == 0:
return basic_info + ' flow: 0'
# Start of third line (flow rates)
name, factor = self._get_flow_name_and_factor(flow_units)
indexer = getattr(self, 'i' + name)
# Remaining lines (all flow rates)
flow_array = factor * indexer[IDs]
if composition:
total_flow = flow_array.sum()
beginning = " composition: "
new_line = '\n' + 14 * ' '
flow_array = flow_array/total_flow
else:
beginning = f' flow ({flow_units}): '
new_line = '\n' + len(beginning) * ' '
flow_rates = ''
lengths = [len(i) for i in IDs]
maxlen = max(lengths) + 2
too_many_chemicals = N_IDs > N_max
N = N_max if too_many_chemicals else N_IDs
for i in range(N):
spaces = ' ' * (maxlen - lengths[i])
if i: flow_rates += new_line
flow_rates += IDs[i] + spaces + f'{flow_array[i]:.3g}'
if too_many_chemicals: flow_rates += new_line + '...'
if composition:
dashes = '-' * (maxlen - 2)
flow_rates += f"{new_line}{dashes} {total_flow:.3g} {flow_units}"
return (basic_info
+ beginning
+ flow_rates)
def show(self, layout=None, T=None, P=None, flow=None, composition=None, N=None, IDs=None):
"""
Print all specifications.
Parameters
----------
layout : str, optional
Convenience paramater for passing `flow`, `composition`, and `N`.
Must have the form {'c' or ''}{'wt', 'mol' or 'vol'}{# or ''}.
For example: 'cwt100' corresponds to compostion=True, flow='kg/hr',
and N=100.
T : str, optional
Temperature units.
P : str, optional
Pressure units.
flow : str, optional
Flow rate units.
composition : bool, optional
Whether to show composition.
N : int, optional
Number of compounds to display.
IDs : tuple[str], optional
IDs of compounds to display. Defaults to all chemicals
.
Notes
-----
Default values are stored in `Stream.display_units`.
"""
print(self._info(layout, T, P, flow, composition, N, IDs))
_ipython_display_ = show
def print(self, units=None):
"""
Print in a format that you can use recreate the stream.
Parameters
----------
units : str, optional
Units of measure for material flow rates. Defaults to 'kmol/hr'
Examples
--------
>>> import thermosteam as tmo
>>> tmo.settings.set_thermo(['Water', 'Ethanol'], cache=True)
>>> s1 = tmo.Stream(ID='s1',
... Water=20, Ethanol=10, units='kg/hr',
... T=298.15, P=101325, phase='l')
>>> s1.print(units='kg/hr')
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=20, Ethanol=10, units='kg/hr')
>>> s1.print() # Units default to kmol/hr
Stream(ID='s1', phase='l', T=298.15, P=101325, Water=1.11, Ethanol=0.2171, units='kmol/hr')
"""
if not units:
units = 'kmol/hr'
flow = self.mol
else:
flow = self.get_flow(units)
chemical_flows = utils.repr_IDs_data(self.chemicals.IDs, flow)
price = utils.repr_kwarg('price', self.price)
print(f"{type(self).__name__}(ID={repr(self.ID)}, phase={repr(self.phase)}, T={self.T:.2f}, "
f"P={self.P:.6g}{price}{chemical_flows}, units={repr(units)})")
|
[
"thermosteam.ThermalCondition",
"thermosteam.functional.V_to_rho",
"thermosteam.functional.mu_to_nu",
"numpy.asarray",
"numpy.isfinite",
"thermosteam.settings.get_impact_indicator_units",
"chemicals.elements.array_to_atoms",
"thermosteam.functional.Pr",
"thermosteam.Stream",
"numpy.dot",
"thermosteam.functional.alpha",
"flexsolve.wegstein"
] |
[((9089, 9115), 'thermosteam.ThermalCondition', 'tmo.ThermalCondition', (['T', 'P'], {}), '(T, P)\n', (9109, 9115), True, 'import thermosteam as tmo\n'), ((18628, 18646), 'numpy.isfinite', 'np.isfinite', (['price'], {}), '(price)\n', (18639, 18646), True, 'import numpy as np\n'), ((25333, 25388), 'chemicals.elements.array_to_atoms', 'array_to_atoms', (['(self.chemicals.formula_array @ self.mol)'], {}), '(self.chemicals.formula_array @ self.mol)\n', (25347, 25388), False, 'from chemicals.elements import array_to_atoms, symbol_to_index\n'), ((31046, 31081), 'numpy.dot', 'np.dot', (['self.chemicals.MW', 'self.mol'], {}), '(self.chemicals.MW, self.mol)\n', (31052, 31081), True, 'import numpy as np\n'), ((38354, 38402), 'thermosteam.functional.alpha', 'fn.alpha', (['self.kappa', 'self.rho', '(self.Cp * 1000.0)'], {}), '(self.kappa, self.rho, self.Cp * 1000.0)\n', (38362, 38402), True, 'from thermosteam import functional as fn\n'), ((38540, 38568), 'thermosteam.functional.V_to_rho', 'fn.V_to_rho', (['self.V', 'self.MW'], {}), '(self.V, self.MW)\n', (38551, 38568), True, 'from thermosteam import functional as fn\n'), ((38667, 38697), 'thermosteam.functional.mu_to_nu', 'fn.mu_to_nu', (['self.mu', 'self.rho'], {}), '(self.mu, self.rho)\n', (38678, 38697), True, 'from thermosteam import functional as fn\n'), ((38787, 38829), 'thermosteam.functional.Pr', 'fn.Pr', (['(self.Cp * 1000)', 'self.kappa', 'self.mu'], {}), '(self.Cp * 1000, self.kappa, self.mu)\n', (38792, 38829), True, 'from thermosteam import functional as fn\n'), ((75134, 75190), 'thermosteam.Stream', 'tmo.Stream', (['None'], {'T': 'self.T', 'P': 'self.P', 'thermo': 'self.thermo'}), '(None, T=self.T, P=self.P, thermo=self.thermo)\n', (75144, 75190), True, 'import thermosteam as tmo\n'), ((13425, 13469), 'thermosteam.settings.get_impact_indicator_units', 'tmo.settings.get_impact_indicator_units', (['key'], {}), '(key)\n', (13464, 13469), True, 'import thermosteam as tmo\n'), ((14303, 14347), 'thermosteam.settings.get_impact_indicator_units', 'tmo.settings.get_impact_indicator_units', (['key'], {}), '(key)\n', (14342, 14347), True, 'import thermosteam as tmo\n'), ((26880, 26909), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (26890, 26909), True, 'import numpy as np\n'), ((76130, 76172), 'flexsolve.wegstein', 'flx.wegstein', (['equilibrium_approximation', 'T'], {}), '(equilibrium_approximation, T)\n', (76142, 76172), True, 'import flexsolve as flx\n'), ((23042, 23071), 'numpy.asarray', 'np.asarray', (['flow'], {'dtype': 'float'}), '(flow, dtype=float)\n', (23052, 23071), True, 'import numpy as np\n')]
|
import mujoco_py
from pathlib import Path
from mushroom_rl.utils import spaces
from mushroom_rl.environments.mujoco import MuJoCo, ObservationType
from mushroom_rl.utils.running_stats import *
from ._external_simulation import NoExternalSimulation, MuscleSimulation
from .reward_goals import CompleteTrajectoryReward, VelocityProfileReward, \
MaxVelocityReward, NoGoalReward, HumanoidTrajectory
from mushroom_rl.environments.mujoco_envs.humanoid_gait.utils import quat_to_euler
class HumanoidGait(MuJoCo):
"""
Mujoco simulation of a Humanoid Model, based on:
"A deep reinforcement learning based approach towards generating human
walking behavior with a neuromuscular model".
<NAME>., <NAME>., <NAME>., and <NAME>. (2019).
"""
def __init__(self, gamma=0.99, horizon=2000, n_intermediate_steps=10,
use_muscles=True, goal_reward=None, goal_reward_params=None,
obs_avg_window=1, act_avg_window=1):
"""
Constructor.
Args:
gamma (float, 0.99): discount factor for the environment;
horizon (int, 2000): horizon for the environment;
n_intermediate_steps (int, 10): number of steps to apply the same
action to the environment and wait for the next observation;
use_muscles (bool): if external muscle simulation should be used
for actions. If not apply torques directly to the joints;
goal_reward (string, None): type of trajectory used for training
Options available:
'trajectory' - Use trajectory in assets/GaitTrajectory.npz
as reference;
'com_vel_trajectory' - Use only velocity trajectory of COM in
assets/GaitTrajectory.npz as reference;
'vel_profile' - Velocity goal for the center of mass of the
model to follow. The goal is given by a
VelocityProfile instance (or subclass).
And should be included in the
``goal_reward_params``;
'max_vel' - Tries to achieve the maximum possible
velocity;
None - Follows no goal(just tries to survive);
goal_reward_params (dict, None): params needed for creation goal
reward;
obs_avg_window (int, 1): size of window used to average
observations;
act_avg_window (int, 1): size of window used to average actions.
"""
self.use_muscles = use_muscles
self.goal_reward = goal_reward
self.act_avg_window = act_avg_window
self.obs_avg_window = obs_avg_window
model_path = Path(__file__).resolve().parent.parent / "data" / "humanoid_gait" / "human7segment.xml"
action_spec = ["right_hip_frontal", "right_hip_sagittal",
"right_knee", "right_ankle", "left_hip_frontal",
"left_hip_sagittal", "left_knee", "left_ankle",
]
observation_spec = [("root", ObservationType.JOINT_POS),
("right_hip_frontal", ObservationType.JOINT_POS),
("right_hip_sagittal", ObservationType.JOINT_POS),
("right_knee", ObservationType.JOINT_POS),
("right_ankle", ObservationType.JOINT_POS),
("left_hip_frontal", ObservationType.JOINT_POS),
("left_hip_sagittal", ObservationType.JOINT_POS),
("left_knee", ObservationType.JOINT_POS),
("left_ankle", ObservationType.JOINT_POS),
("root", ObservationType.JOINT_VEL),
("right_hip_frontal", ObservationType.JOINT_VEL),
("right_hip_sagittal", ObservationType.JOINT_VEL),
("right_knee", ObservationType.JOINT_VEL),
("right_ankle", ObservationType.JOINT_VEL),
("left_hip_frontal", ObservationType.JOINT_VEL),
("left_hip_sagittal", ObservationType.JOINT_VEL),
("left_knee", ObservationType.JOINT_VEL),
("left_ankle", ObservationType.JOINT_VEL),
]
collision_groups = [("floor", ["floor"]),
("left_foot", ["left_foot"]),
("right_foot", ["right_foot"])
]
super().__init__(model_path.as_posix(), action_spec, observation_spec, gamma=gamma,
horizon=horizon, n_substeps=1,
n_intermediate_steps=n_intermediate_steps,
collision_groups=collision_groups)
if use_muscles:
self.external_actuator = MuscleSimulation(self._sim)
self.info.action_space = spaces.Box(
*self.external_actuator.get_action_space())
else:
self.external_actuator = NoExternalSimulation()
low, high = self.info.action_space.low.copy(),\
self.info.action_space.high.copy()
self.norm_act_mean = (high + low) / 2.0
self.norm_act_delta = (high - low) / 2.0
self.info.action_space.low[:] = -1.0
self.info.action_space.high[:] = 1.0
if goal_reward_params is None:
goal_reward_params = dict()
if goal_reward == "trajectory" or goal_reward == "com_vel_trajectory":
control_dt = self._sim.model.opt.timestep * self._n_intermediate_steps
self.goal_reward = CompleteTrajectoryReward(self._sim, control_dt,
**goal_reward_params)
elif goal_reward == "vel_profile":
self.goal_reward = VelocityProfileReward(self._sim, **goal_reward_params)
elif goal_reward == "max_vel":
self.goal_reward = MaxVelocityReward(self._sim, **goal_reward_params)
elif goal_reward is None:
self.goal_reward = NoGoalReward()
else:
raise NotImplementedError("The specified goal reward has not been"
"implemented: ", goal_reward)
if goal_reward == "trajectory":
self.reward_weights = dict(live_reward=0.10, goal_reward=0.40,
traj_vel_reward=0.50,
move_cost=0.10, fall_cost=0.00)
elif goal_reward == "com_vel_trajectory":
self.reward_weights = dict(live_reward=0.00, goal_reward=0.00,
traj_vel_reward=1.00,
move_cost=0.00, fall_cost=0.00)
else:
self.reward_weights = dict(live_reward=0.10, goal_reward=0.90,
traj_vel_reward=0.00,
move_cost=0.10, fall_cost=0.00)
self.info.observation_space = spaces.Box(*self._get_observation_space())
self.mean_grf = RunningAveragedWindow(shape=(6,),
window_size=n_intermediate_steps)
self.mean_vel = RunningExpWeightedAverage(shape=(3,), alpha=0.005)
self.mean_obs = RunningAveragedWindow(
shape=self.info.observation_space.shape,
window_size=obs_avg_window
)
self.mean_act = RunningAveragedWindow(
shape=self.info.action_space.shape, window_size=act_avg_window)
def step(self, action):
action = ((action.copy() * self.norm_act_delta) + self.norm_act_mean)
state, reward, absorbing, info = super().step(action)
self.mean_obs.update_stats(state)
self.mean_vel.update_stats(self._sim.data.qvel[0:3])
avg_obs = self.mean_obs.mean
avg_obs[13:16] = self.mean_vel.mean
return avg_obs, reward, absorbing, info
def render(self):
if self._viewer is None:
self._viewer = mujoco_py.MjViewer(self._sim)
self._viewer._render_every_frame = True
self._viewer.render()
def _setup(self):
self.goal_reward.reset_state()
start_obs = self._reset_model(qpos_noise=0.0, qvel_noise=0.0)
start_vel = (
self._sim.data.qvel[0:3] if (self.goal_reward is None or isinstance(
self.goal_reward, MaxVelocityReward)
) else self.goal_reward.get_observation())
self.mean_vel.reset(start_vel)
self.mean_obs.reset(start_obs)
self.mean_act.reset()
self.external_actuator.reset()
def _reward(self, state, action, next_state):
live_reward = 1.0
goal_reward = self.goal_reward(state, action, next_state)
traj_vel_reward = 0.0
if isinstance(self.goal_reward, HumanoidTrajectory):
traj_vel_reward = np.exp(-20.0 * np.square(
next_state[13] - next_state[33]))
move_cost = self.external_actuator.cost(
state, action / self.norm_act_delta, next_state)
fall_cost = 0.0
if self._has_fallen(next_state):
fall_cost = 1.0
total_reward = self.reward_weights["live_reward"] * live_reward \
+ self.reward_weights["goal_reward"] * goal_reward \
+ self.reward_weights["traj_vel_reward"] * traj_vel_reward \
- self.reward_weights["move_cost"] * move_cost \
- self.reward_weights["fall_cost"] * fall_cost
return total_reward
def _is_absorbing(self, state):
return (self._has_fallen(state)
or self.goal_reward.is_absorbing(state)
or self.external_actuator.is_absorbing(state)
)
def _get_observation_space(self):
sim_low, sim_high = (self.info.observation_space.low[2:],
self.info.observation_space.high[2:])
grf_low, grf_high = (-np.ones((6,)) * np.inf,
np.ones((6,)) * np.inf)
r_low, r_high = self.goal_reward.get_observation_space()
a_low, a_high = self.external_actuator.get_observation_space()
return (np.concatenate([sim_low, grf_low, r_low, a_low]),
np.concatenate([sim_high, grf_high, r_high, a_high]))
def _reset_model(self, qpos_noise=0.0, qvel_noise=0.0):
self._set_state(self._sim.data.qpos + np.random.uniform(
low=-qpos_noise, high=qpos_noise, size=self._sim.model.nq),
self._sim.data.qvel + np.random.uniform(low=-qvel_noise,
high=qvel_noise,
size=self._sim.model.nv)
)
return self._create_observation()
def _set_state(self, qpos, qvel):
old_state = self._sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self._sim.set_state(new_state)
self._sim.forward()
@staticmethod
def _has_fallen(state):
torso_euler = quat_to_euler(state[1:5])
return ((state[0] < 0.90) or (state[0] > 1.20)
or abs(torso_euler[0]) > np.pi / 12
or (torso_euler[1] < -np.pi / 12) or (torso_euler[1] > np.pi / 8)
or (torso_euler[2] < -np.pi / 4) or (torso_euler[2] > np.pi / 4)
)
def _create_observation(self):
"""
Creates full vector of observations:
obs[0:13] -> qpos(from mujoco obs)
obs[0] -> torso z pos
obs[1:5] -> torso quaternion orientation
obs[5:13] -> leg joints angle
obs[13:27] -> qvel(from mujoco obs)
obs[13:16] -> torso linear velocity
obs[16:19] -> torso angular velocity
obs[19:27] -> leg joints angular velocity
obs[27:30] -> ground force
obs[27:30] -> ground force on right foot(xyz)
obs[30:33] -> ground force on left foot(xyz)
obs[33:33+(len(goal_observation)] -> observations related
to the goal
obs[last_obs_id - len(ext_actuator_obs): last_obs_id]
-> observations related to the external actuator
"""
obs = np.concatenate([super(HumanoidGait, self)._create_observation()[2:],
self.mean_grf.mean / 1000.,
self.goal_reward.get_observation(),
self.external_actuator.get_observation()
]).flatten()
return obs
def _preprocess_action(self, action):
action = self.external_actuator.preprocess_action(action)
self.mean_act.update_stats(action)
return self.mean_act.mean
def _step_init(self, state, action):
self.external_actuator.initialize_internal_states(state, action)
def _compute_action(self, action):
action = self.external_actuator.external_stimulus_to_joint_torques(
action
)
return action
def _simulation_post_step(self):
grf = np.concatenate(
[self._get_collision_force("floor", "right_foot")[:3],
self._get_collision_force("floor", "left_foot")[:3]]
)
self.mean_grf.update_stats(grf)
def _step_finalize(self):
self.goal_reward.update_state()
self.external_actuator.update_state()
def _get_body_center_of_mass_pos(self, body_name):
return self._sim.data.subtree_com[
self._sim.model._body_name2id[body_name]]
|
[
"mujoco_py.MjViewer",
"pathlib.Path",
"mujoco_py.MjSimState",
"mushroom_rl.environments.mujoco_envs.humanoid_gait.utils.quat_to_euler"
] |
[((11287, 11376), 'mujoco_py.MjSimState', 'mujoco_py.MjSimState', (['old_state.time', 'qpos', 'qvel', 'old_state.act', 'old_state.udd_state'], {}), '(old_state.time, qpos, qvel, old_state.act, old_state.\n udd_state)\n', (11307, 11376), False, 'import mujoco_py\n'), ((11549, 11574), 'mushroom_rl.environments.mujoco_envs.humanoid_gait.utils.quat_to_euler', 'quat_to_euler', (['state[1:5]'], {}), '(state[1:5])\n', (11562, 11574), False, 'from mushroom_rl.environments.mujoco_envs.humanoid_gait.utils import quat_to_euler\n'), ((8359, 8388), 'mujoco_py.MjViewer', 'mujoco_py.MjViewer', (['self._sim'], {}), '(self._sim)\n', (8377, 8388), False, 'import mujoco_py\n'), ((2963, 2977), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2967, 2977), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_shellcraft.
Tests for `shellcraft` module.
"""
from __future__ import unicode_literals
import os
import pytest
from click.testing import CliRunner
import pkg_resources
from shellcraft.cli import get_game, cli
from shellcraft.shellcraft import Game
@pytest.fixture(scope="module")
def game():
"""Create a local game."""
runner = CliRunner()
with runner.isolated_filesystem():
game = get_game("test.json")
return game
def load_game(filename):
"""Load game from fixtures."""
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "fixtures", filename
)
print(filename)
return Game.load(filename)
def test_basic_cli(game):
"""Test that the interface loads."""
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert "Welcome to ShellCraft" in result.output
help_result = runner.invoke(cli, ["--help"])
assert help_result.exit_code == 0
assert "Show this message and exit." in help_result.output
version_result = runner.invoke(cli, ["--version"])
assert version_result.exit_code == 0
assert pkg_resources.get_distribution("shellcraft").version in version_result.output
def test_contract(game):
game = load_game("save1.json")
assert game.resources.get("clay") == 30
def test_game_run(game):
"""Test that the basic game flow works."""
commands = """
mine clay
mine clay
mine clay
mine clay
craft shovel
mine clay
mine clay
mine clay
mine clay
mine clay
craft sturdy_shovel
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
mine clay
research small_cart
craft small_cart
mine clay"""
runner = CliRunner()
game.state.debug = True
for command in commands.splitlines():
assert not command or command.split()[0] in list(
game.state.commands_enabled
), "{} not in {}".format(command.split()[0], list(game.state.commands_enabled))
runner.invoke(cli, command.split())
game.tutorial.cont()
assert "small_cart" in game.state.research_completed
assert game.state.tutorial_step == 11
assert game.resources.get("clay") == 4
|
[
"pkg_resources.get_distribution",
"shellcraft.cli.get_game",
"shellcraft.shellcraft.Game.load",
"os.path.abspath",
"pytest.fixture",
"click.testing.CliRunner"
] |
[((310, 340), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (324, 340), False, 'import pytest\n'), ((397, 408), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (406, 408), False, 'from click.testing import CliRunner\n'), ((702, 721), 'shellcraft.shellcraft.Game.load', 'Game.load', (['filename'], {}), '(filename)\n', (711, 721), False, 'from shellcraft.shellcraft import Game\n'), ((804, 815), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (813, 815), False, 'from click.testing import CliRunner\n'), ((1851, 1862), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1860, 1862), False, 'from click.testing import CliRunner\n'), ((463, 484), 'shellcraft.cli.get_game', 'get_game', (['"""test.json"""'], {}), "('test.json')\n", (471, 484), False, 'from shellcraft.cli import get_game, cli\n'), ((616, 641), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (631, 641), False, 'import os\n'), ((1192, 1236), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""shellcraft"""'], {}), "('shellcraft')\n", (1222, 1236), False, 'import pkg_resources\n')]
|
"""
Module for classes to prepare validation dataset from MedInfo dataset.
Data format will be {key: {'question': question, 'summary':, summ, 'articles': articles} ...}
Additionally, format for question driven summarization. For example:
python prepare_validation_data.py -t --add-q
"""
import json
import argparse
import re
import spacy
import rouge
def get_args():
"""
Argument defnitions
"""
parser = argparse.ArgumentParser(description="Arguments for data exploration")
parser.add_argument("--pg",
dest="pg",
action="store_true",
help="tag the sentences with <s> and </s>, for use with pointer generator network")
parser.add_argument("--bart",
dest="bart",
action="store_true",
help="Prepare data for BART")
parser.add_argument("--add-q",
dest="add_q",
action="store_true",
help="Concatenate the question to the beginning of the text for question driven summarization")
return parser
class MedInfo():
def __init__(self):
"""
Initiate class for processing medinfo collection
"""
self.nlp = spacy.load('en_core_web_sm')
if args.add_q:
self.q_name = "_with_question"
else:
self.q_name = "_without_question"
def _load_collection(self):
"""
Load medinfo collection prepared in the process_medinfo.py script
"""
with open("data/medinfo_collection.json", "r", encoding="utf-8") as f:
medinfo = json.load(f)
return medinfo
def _format_summary_sentences(self, summary):
"""
Split summary into sentences and add sentence tags to the strings: <s> and </s>
"""
tokenized_abs = self.nlp(summary)
summary = " ".join(["<s> {s} </s>".format(s=s.text.strip()) for s in tokenized_abs.sents])
return summary
def save_section2answer_validation_data(self, tag_sentences):
"""
For questions that have a corresponding section-answer pair, save the
validation data in following format
{'question': {'summary': text, 'articles': text}}
"""
dev_dict = {}
medinfo = self._load_collection()
data_pair = 0
Q_END = " [QUESTION?] "
for i, question in enumerate(medinfo):
try:
# There may be multiple answers per question, but for the sake of the validation set,
# just use the first answer
if 'section_text' in medinfo[question][0]:
article = medinfo[question][0]['section_text']
summary = medinfo[question][0]['answer']
# Stripping of whitespace was done in processing script for section and full page
# but not for answer or question
summary = re.sub(r"\s+", " ", summary)
question = re.sub(r"\s+", " ", question)
if args.add_q:
article = question + Q_END + article
assert len(summary) <= (len(article) + 10)
if tag_sentences:
summary = self._format_summary_sentences(summary)
tag_string = "_s-tags"
else:
tag_string = ""
data_pair += 1
dev_dict[i] = {'question': question, 'summary': summary, 'articles': article}
except AssertionError:
print("Answer longer than summary. Skipping element")
print("Number of page-section pairs:", data_pair)
with open("data/medinfo_section2answer_validation_data{0}{1}.json".format(self.q_name, tag_string), "w", encoding="utf-8") as f:
json.dump(dev_dict, f, indent=4)
def process_data():
"""
Main function for saving data
"""
# Run once for each
if args.pg:
MedInfo().save_section2answer_validation_data(tag_sentences=True)
if args.bart:
MedInfo().save_section2answer_validation_data(tag_sentences=False)
if __name__ == "__main__":
global args
args = get_args().parse_args()
process_data()
|
[
"json.dump",
"json.load",
"argparse.ArgumentParser",
"spacy.load",
"re.sub"
] |
[((425, 494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments for data exploration"""'}), "(description='Arguments for data exploration')\n", (448, 494), False, 'import argparse\n'), ((1286, 1314), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1296, 1314), False, 'import spacy\n'), ((1673, 1685), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1682, 1685), False, 'import json\n'), ((3930, 3962), 'json.dump', 'json.dump', (['dev_dict', 'f'], {'indent': '(4)'}), '(dev_dict, f, indent=4)\n', (3939, 3962), False, 'import json\n'), ((3009, 3037), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'summary'], {}), "('\\\\s+', ' ', summary)\n", (3015, 3037), False, 'import re\n'), ((3069, 3098), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'question'], {}), "('\\\\s+', ' ', question)\n", (3075, 3098), False, 'import re\n')]
|
import wheel
from src import roulette_cancellation
from src import bet
from src import table
from src import wheel
from src import roulette_game
import unittest
class TestCancellation(unittest.TestCase):
def setUp(self):
self.wheel = wheel.Wheel()
self.table = table.Table(minimum=10, maximum=1000)
self.game = roulette_game.RouletteGame(self.wheel, self.table)
self.player = roulette_cancellation.RouletteCancellation(table=self.table, wheel=self.wheel)
def test_placeBets(self):
self.assertEqual(len(self.table.bets), 0)
self.assertEqual(self.player.stake, 10000)
self.player.placeBets(self.game)
self.assertEqual(len(self.table.bets), 1)
self.assertEqual(self.player.stake, 9993)
def test_win(self):
self.assertEqual(len(self.player.sequence), 6)
if len(self.player.sequence)==1:
bet_amount = self.player.sequence[0]
elif len(self.player.sequence)>1:
bet_amount = self.player.sequence[0] + self.player.sequence[-1]
self.player.win(bet.Bet(outcome=self.wheel.all_outcomes.get('Black'), amount=bet_amount))
self.assertEqual(len(self.player.sequence), 4)
self.assertEqual(len(self.player.sequence), 4)
def test_lose(self):
self.assertEqual(len(self.player.sequence), 6)
self.player.lose()
self.assertEqual(len(self.player.sequence), 7)
self.assertEqual(self.player.sequence[-1], self.player.sequence[0]+self.player.sequence[-2])
def tearDown(self):
self.wheel = None
self.table = None
self.player = None
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.table.Table",
"src.roulette_game.RouletteGame",
"src.wheel.Wheel",
"src.roulette_cancellation.RouletteCancellation"
] |
[((1678, 1693), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1691, 1693), False, 'import unittest\n'), ((247, 260), 'src.wheel.Wheel', 'wheel.Wheel', ([], {}), '()\n', (258, 260), False, 'from src import wheel\n'), ((282, 319), 'src.table.Table', 'table.Table', ([], {'minimum': '(10)', 'maximum': '(1000)'}), '(minimum=10, maximum=1000)\n', (293, 319), False, 'from src import table\n'), ((340, 390), 'src.roulette_game.RouletteGame', 'roulette_game.RouletteGame', (['self.wheel', 'self.table'], {}), '(self.wheel, self.table)\n', (366, 390), False, 'from src import roulette_game\n'), ((413, 491), 'src.roulette_cancellation.RouletteCancellation', 'roulette_cancellation.RouletteCancellation', ([], {'table': 'self.table', 'wheel': 'self.wheel'}), '(table=self.table, wheel=self.wheel)\n', (455, 491), False, 'from src import roulette_cancellation\n')]
|
# AUTHOR: <NAME>
# 13-02-2022
# calculate the gini coefficient give the retrievability file
import os
import argparse
from collections import defaultdict
def check_file_exists(filename):
if filename and not os.path.exists(filename):
print("{0} Not Found".format(filename))
quit(1)
def calculate_gini(list_of_values):
# https://planspace.org/2013/06/21/how-to-calculate-gini-coefficient-from-raw-data-in-python/
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2.
return (fair_area - area) / fair_area
def process_results(ret_file):
ret_scores = []
ret_total = 0.0
with open(ret_file, "r") as rf:
while rf:
line = rf.readline().strip()
if not line:
break
(doc_id, score) = line.split('\t')
doc_id = doc_id.strip()
score = float(score.strip())
ret_scores.append(score)
ret_total += score
rf.close()
print(f'Read in {len(ret_scores)} scores.')
print(f'Total Retrievability Mass: {ret_total:.4f}')
g = calculate_gini(ret_scores)
print(f'Gini Cofficient is: {g:.4f}')
def parse_args():
arg_parser = argparse.ArgumentParser(description="Gini Cofficient Calculator")
arg_parser.add_argument("ret_file", help="A retrievability file. Two colum tab/space sep file with fields:"
"doc_id retrievability_score")
args = arg_parser.parse_args()
return args
def main(ret_file):
print(f'About to compute the Gini given the retrievability file {ret_file}')
process_results(ret_file)
print(f'Done!')
if __name__ == '__main__':
args = parse_args()
check_file_exists(args.ret_file)
main(args.ret_file)
|
[
"os.path.exists",
"argparse.ArgumentParser"
] |
[((1332, 1397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gini Cofficient Calculator"""'}), "(description='Gini Cofficient Calculator')\n", (1355, 1397), False, 'import argparse\n'), ((213, 237), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (227, 237), False, 'import os\n')]
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_array
import numpy as np
from ..utils.tools import Solver
class MissForest(Solver):
def __init__(
self,
n_estimators=300,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features='auto',
max_samples=None,
normalizer='min_max'):
"""
Parameters
----------
n_estimators: integer, optional (default=10)
max_depth: integer or None, optional (default=None)
The maximum depth of the tree.
If None, then nodes are expanded until all leaves are pure
or until all leaves contain less than min_samples_split samples.
min_samples_split: int, float, optional (default=2)
The minimum number of samples required to split an internal node
min_samples_leaf: int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves
at least min_samples_leaf training samples in each of the left and right branches.
This may have the effect of smoothing the model, especially in regression.
max_features: int, float, string or None, optional (default=”auto”)
The number of features to consider when looking for the best split
if int, then consider max_features features at each split.
If float, then max_features is a fraction and int(max_features * n_features) features are considered at each split.
If “auto”, then max_features=n_features.
If “sqrt”, then max_features=sqrt(n_features).
If “log2”, then max_features=log2(n_features).
If None, then max_features=n_features.
max_samples: int or float, default=None
If bootstrap is True, the number of samples to draw from X to train each base estimator.
If None (default), then draw X.shape[0] samples.
If int, then draw max_samples samples.
If float, then draw max_samples * X.shape[0] samples. Thus, max_samples should be in the interval (0, 1)
"""
self.coltype_dict = None
self.mask_memo_dict = None
self.sorted_col = None
self.stop = False
self.rf_reg = RandomForestRegressor(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.rf_cla = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
min_samples_split=min_samples_split)
self.imp_continuous_index = None
self.imp_categorical_index = None
self.normalizer = normalizer
Solver.__init__(self,
normalizer=normalizer)
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
self.sorted_col = self.sort_col(missing_mask)
self.coltype_dict = self._judge_type(X)
self.imp_continuous_index, self.imp_categorical_index = \
self.get_type_index(missing_mask, self.coltype_dict)
differ_categorical = float('inf')
differ_continuous = float('inf')
init_fill = X
while self.stop is False:
differ_categorical_old = differ_categorical
differ_continuous_old = differ_continuous
x_old_imp = init_fill
x_new_imp = []
for col in self.sorted_col:
tmp = []
if self.coltype_dict[col] is 'categorical':
model = self.rf_cla
else:
model = self.rf_reg
x_obs, y_obs, x_mis = self.split(init_fill, col, missing_mask)
model.fit(x_obs, y_obs)
y_mis = model.predict(x_mis)
for ele in y_mis:
tmp.append(ele)
x_new_imp.append(ele)
init_fill[:, col][missing_mask[:,col]] = tmp
x_new_imp = np.asarray(x_new_imp)
differ_continuous, differ_categorical = self._lose_func(x_new_imp, x_old_imp)
if differ_continuous >= differ_continuous_old and differ_categorical >= differ_categorical_old:
self.stop = True
return init_fill
def _lose_func(self, imp_new, imp_old):
"""
Evaluation Method, mathematical concept are available at 'https://www.stu-zhouyc.com/iterForest/metrics'
:param imputed_data_old: a dict like {'col name':[predicted value1,...],...}
the dict contains original missing index which is part of the original data
its the last estimated data
accompany with brand-new imputed data, they are going to be evaluate.
:return:
"""
continuous_imp_new = imp_new[self.imp_continuous_index]
continuous_imp_old = imp_old[self.imp_continuous_index]
categorical_imp_new = imp_new[self.imp_categorical_index]
categorical_imp_old = imp_old[self.imp_categorical_index]
try:
continuous_div = continuous_imp_new - continuous_imp_old
continuous_div = continuous_div.dot(continuous_div)
continuous_sum = continuous_imp_new.dot(continuous_imp_new)
categorical_count = np.sum(categorical_imp_new == categorical_imp_old)
categorical_var_len = len(categorical_imp_new)
except:
categorical_var_len = 0.01
categorical_count = 0
continuous_div = 0
continuous_sum = 0.001
if categorical_var_len is 0:
categorical_differ = 0
else:
categorical_differ = categorical_count / categorical_var_len
if continuous_sum is 0:
continuous_differ = 0
else:
continuous_differ = continuous_div / continuous_sum
return continuous_differ, categorical_differ
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.sum",
"sklearn.utils.check_array",
"numpy.asarray",
"sklearn.ensemble.RandomForestRegressor"
] |
[((2477, 2653), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features', 'min_samples_split': 'min_samples_split'}), '(n_estimators=n_estimators, max_depth=max_depth,\n min_samples_leaf=min_samples_leaf, max_features=max_features,\n min_samples_split=min_samples_split)\n', (2498, 2653), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2844, 3021), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features', 'min_samples_split': 'min_samples_split'}), '(n_estimators=n_estimators, max_depth=max_depth,\n min_samples_leaf=min_samples_leaf, max_features=max_features,\n min_samples_split=min_samples_split)\n', (2866, 3021), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3427, 3465), 'sklearn.utils.check_array', 'check_array', (['X'], {'force_all_finite': '(False)'}), '(X, force_all_finite=False)\n', (3438, 3465), False, 'from sklearn.utils import check_array\n'), ((4610, 4631), 'numpy.asarray', 'np.asarray', (['x_new_imp'], {}), '(x_new_imp)\n', (4620, 4631), True, 'import numpy as np\n'), ((5980, 6030), 'numpy.sum', 'np.sum', (['(categorical_imp_new == categorical_imp_old)'], {}), '(categorical_imp_new == categorical_imp_old)\n', (5986, 6030), True, 'import numpy as np\n')]
|
import os
from paranestamol.utils import Legend, cleanupFileRoot
if os.getenv('QT_API') != "PySide2":
raise RuntimeError(f'TL;DR \n\n`QT_API=PySide2 python3 -m paranestamol `\n\n The moron who\
designed `matplotlib_backend_qtquick` hard-coded a preference for\
pyqt5 for their backend, despite supporting PySide2 full well. \nAdd\
this to your .basrc file as a workaround, or better yet, inconvenience\
said moron at https://github.com/jmitrevs/matplotlib_backend_qtquick')
# This is not an unused import. This is because Python has the Zen
# statement of "There should be (preferably) one obvious way to do
# things" that roughly translates to "There should be at least two
# different ways to do one thing. The more obvious way(s) should be
# wrong, incompatible or somehow deficient but in a non-obvious
# manner. " Why can't you agree on keeping *one* backend to Qt?
from matplotlib_backend_qtquick.backend_qtquickagg import FigureCanvasQtQuickAgg as FigureCanvasQML
from matplotlib_backend_qtquick.qt_compat import QtQml, QtGui, QtWidgets, QtCore
|
[
"os.getenv"
] |
[((69, 88), 'os.getenv', 'os.getenv', (['"""QT_API"""'], {}), "('QT_API')\n", (78, 88), False, 'import os\n')]
|
#------------------------------------------------------------------------------+
#
# <NAME>
# Create a three color triangle
# 2017-DEC
#
#------------------------------------------------------------------------------+
#--- IMPORT DEPENDENCIES ------------------------------------------------------+
from __future__ import division
from math import sin
from math import radians
#--- MAIN ---------------------------------------------------------------------+
def calc_dist(x0, y0, x1, y1):
return ((x0 - x1)**2 + (y0 - y1)**2)**0.5
def clamp(x):
return max(0, min(int(round(x,0)), 255))
def interpolate_color(color_info, x, y):
c1_dist = calc_dist(color_info['c1_x'], color_info['c1_y'], x, y)
c2_dist = calc_dist(color_info['c2_x'], color_info['c2_y'], x, y)
c3_dist = calc_dist(color_info['c3_x'], color_info['c3_y'], x, y)
dist_total = c1_dist + c2_dist + c3_dist
c1_norm = 1 - (c1_dist / color_info['max dist'])
c2_norm = 1 - (c2_dist / color_info['max dist'])
c3_norm = 1 - (c3_dist / color_info['max dist'])
new_r = (c1_norm * color_info['c1'][0]) + (c2_norm * color_info['c2'][0]) + (c3_norm * color_info['c3'][0])
new_g = (c1_norm * color_info['c1'][1]) + (c2_norm * color_info['c2'][1]) + (c3_norm * color_info['c3'][1])
new_b = (c1_norm * color_info['c1'][2]) + (c2_norm * color_info['c2'][2]) + (c3_norm * color_info['c3'][2])
new_color_hex = "#{0:02x}{1:02x}{2:02x}".format(clamp(new_r), clamp(new_g), clamp(new_b))
return new_color_hex
def svg_color_triangle(tri_rows, c1, c2, c3, file_name, edge_len=1000):
# CALCULATE GEOMETRY CONSTANTS
tri_base_len = edge_len / tri_rows
tri_height = sin(radians(60)) * edge_len
tri_row_height = tri_height / tri_rows
# CALCULATE COLOR CONSTANTS
color_info = {}
color_info['c1_x'] = edge_len / 2
color_info['c1_y'] = tri_height - (tri_row_height / 2)
color_info['c2_x'] = tri_base_len / 2
color_info['c2_y'] = tri_row_height * 0.5
color_info['c3_x'] = edge_len - (tri_base_len / 2)
color_info['c3_y'] = tri_row_height * 0.5
color_info['max dist'] = color_info['c3_x'] - color_info['c2_x']
color_info['c1'] = c1
color_info['c2'] = c2
color_info['c3'] = c3
# CREATE INITIAL SVG FILE
with open(f'{file_name}.svg','w') as out_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no"?>', file=out_file)
print(f'<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{edge_len}px" height="{tri_height}px">', file=out_file)
# START WITH THE TOP OF THE TRIANGLE AND WORK DOWN
for row in range(0, tri_rows):
# CALCULATE ROW CONSTANTS
row_y_top = tri_height - (tri_row_height * row)
row_y_bot = tri_height - (tri_row_height * (row + 1))
row_x_top = (edge_len/2) - (row * tri_base_len * 0.5)
row_x_bot = (edge_len/2) - ((row + 1) * tri_base_len * 0.5)
# GENERATE TRIANGLES
if row >= 1:
# CYCLE THROUGH EACH ROW
for i in range(0,row):
# GENERATE TWO TRIANGLE PAIRS
t1_0_x = row_x_top + (i * tri_base_len) # top
t1_0_y = row_y_top # top
t1_1_x = row_x_bot + (i * tri_base_len) # bottom left
t1_1_y = row_y_bot # bottom left
t1_2_x = row_x_bot + ((i+1) * tri_base_len) # bottom right
t1_2_y = row_y_bot # bottom right
t1_c_x = t1_0_x # triangle centroid
t1_c_y = (row_y_bot + row_y_top) / 2 # triangle centroid
t1_color = interpolate_color(color_info, t1_c_x, t1_c_y)
t2_0_x = row_x_top + ((i+1) * tri_base_len) # top right
t2_0_y = row_y_top # top right
t2_1_x = row_x_top + (i * tri_base_len) # top left
t2_1_y = row_y_top # top left
t2_2_x = row_x_bot + ((i+1) * tri_base_len) # bottom
t2_2_y = row_y_bot # bottom
t2_c_x = row_x_bot + ((i+1) * tri_base_len) # triangle centroid
t2_c_y =(row_y_bot + row_y_top) / 2 # triangle centroid
t2_color = interpolate_color(color_info, t2_c_x, t2_c_y)
# WRITE TRIANGLES TO SVG FILE
print('<polygon points="'+str(t1_0_x)+','+str(t1_0_y)+' '+str(t1_1_x)+','+str(t1_1_y)+' '+str(t1_2_x)+','+str(t1_2_y)+'" fill="'+t1_color+'" stroke="'+t1_color+'"/>', file=out_file)
print('<polygon points="'+str(t2_0_x)+','+str(t2_0_y)+' '+str(t2_1_x)+','+str(t2_1_y)+' '+str(t2_2_x)+','+str(t2_2_y)+'" fill="'+t2_color+'" stroke="'+t2_color+'"/>', file=out_file)
# GENERATE LAST TRIANGLE IN ROW
t3_0_x = edge_len - row_x_top # top
t3_0_y = row_y_top # top
t3_1_x = edge_len - row_x_bot - tri_base_len # bottom left
t3_1_y = row_y_bot # bottom left
t3_2_x = edge_len - row_x_bot # bottom right
t3_2_y = row_y_bot # bottom right
t3_c_x = t3_0_x # triangle centroid
t3_c_y = (row_y_bot + row_y_top) / 2 # triangle centroid
t3_color = interpolate_color(color_info, t3_c_x, t3_c_y)
# WRITE TRIANGLE TO SVG FILE
print('<polygon points="'+str(t3_0_x)+','+str(t3_0_y)+' '+str(t3_1_x)+','+str(t3_1_y)+' '+str(t3_2_x)+','+str(t3_2_y)+'" fill="'+t3_color+'" stroke="'+t3_color+'"/>', file=out_file)
# FINISH SVG FILE AND CLOSE
print('</svg>', file=out_file)
out_file.close()
pass
#--- END ----------------------------------------------------------------------+
|
[
"math.radians"
] |
[((1688, 1699), 'math.radians', 'radians', (['(60)'], {}), '(60)\n', (1695, 1699), False, 'from math import radians\n')]
|
import yfinance
import discord
from discord.ext import commands
import threading
import asyncio
import traceback
import requests
import time
class Asset(object):
def __init__(self, symbol=None, name=None, price=None, url=None):
self.symbol = symbol
self.name = name
self.price = price
self.url = url
def to_embed(self):
e = discord.Embed(title=self.symbol)
e.add_field(name='Ticker', value=self.symbol, inline=False)
e.add_field(name='Name', value=self.name, inline=False)
e.add_field(name='Price (USD)', value=f'{self.price:0.4f}', inline=False)
if self.url is not None:
e.set_image(url=self.url)
return e
class Market(object):
def __init__(self, max_reqs, wait_time):
self.semaphore = threading.Semaphore(value=max_reqs)
self.wait_time = wait_time
async def release(self):
# wait, for rate limiting
await asyncio.sleep(self.wait_time)
self.semaphore.release()
async def get(self, symbol):
for i in range(20):
if self.semaphore.acquire(blocking=False):
try:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, self._get, symbol)
except Exception:
traceback.print_exc()
result = None
finally:
asyncio.create_task(self.release())
return result
await asyncio.sleep(0.1)
raise TimeoutError()
class StockMarket(Market):
def _get(self, symbol):
try:
t = yfinance.Ticker(symbol)
if symbol.startswith('^'):
return Asset(symbol=symbol, name=t.info['shortName'], price=t.info['regularMarketPrice'])
else:
return Asset(symbol=symbol, name=t.info['longName'], price=t.info['currentPrice'])
except KeyError:
return None
class OSRSMarket(Market):
base_url = 'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha=%23&page=1'
def find_item(self, name, page=1):
if name[0].isnumeric():
url = f'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha=%23&page={page}'
else:
url = f'https://secure.runescape.com/m=itemdb_oldschool/api/catalogue/items.json?category=1&alpha={name[0].lower()}&page={page}'
j = requests.get(url).json()
if len(j['items']) == 0:
# no more items
return None
# try to find exact items
items = [(x['name'], x['current']['price'], x['icon_large']) for x in j['items']]
for (n, p, i) in items:
if n.lower() == name:
# we found it
return (n, p, i)
# we didn't find it
# try searching deeper
# rate limiting
time.sleep(1.2)
r = self.find_item(name, page=page + 1)
if r is None:
# it doesn't exist verbatim anywhere
# try seeing if a similar item exists
for (n, p, i) in items:
if name in n.lower():
# we found it, it just has a longer name
return (n, p, i)
# it really isn't here
return None
# it does exist
return r
def _get(self, symbol):
x = self.find_item(symbol.lower())
if x is None:
return None
return Asset(symbol=symbol, name=x[0], price=x[1], url=x[2])
class ForexMarket(Market):
initial_lock = threading.Lock()
initial_done = False
full_names = {}
def _get(self, symbol):
while not self.initial_done:
if self.initial_lock.acquire(timeout=0.1):
try:
if not self.initial_done:
j = requests.get('https://api.frankfurter.app/currencies').json()
for (k, v) in j.items():
self.full_names[k] = v
self.initial_done = True
finally:
self.initial_lock.release()
ticker = symbol.upper()
if ticker not in self.full_names.keys():
for x in self.full_names:
if ticker in self.full_names[x].upper():
ticker = x
break
else:
return None
j = requests.get('https://api.frankfurter.app/latest?from=USD').json()
price = 1.0 / j['rates'][ticker]
return Asset(symbol=ticker, name=self.full_names[ticker], price=price)
class CryptoMarket(Market):
initial_lock = threading.Lock()
initial_done = False
coins = []
def _get(self, symbol):
while not self.initial_done:
if self.initial_lock.acquire(timeout=0.1):
try:
if not self.initial_done:
self.coins = requests.get('https://api.coingecko.com/api/v3/coins/list').json()
self.initial_done = True
finally:
self.initial_lock.release()
ticker = symbol
for item in self.coins:
if (item['name'].upper() == ticker.upper()) or (item['symbol'].upper() == ticker.upper()):
id = item['id']
j = requests.get(f'https://api.coingecko.com/api/v3/coins/{id}').json()
return Asset(symbol=j['symbol'].upper(), name=j['name'], price=j['market_data']['current_price']['usd'])
return None
class FinanceCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.stock_market = StockMarket(5, 30)
self.osrs_market = OSRSMarket(4, 30)
self.forex_market = ForexMarket(15, 30)
self.crypto_market = CryptoMarket(4, 30)
async def get_info(self, ticker):
if len(ticker) < 2:
raise ValueError()
ticker = ticker.strip()
t = ticker[0]
a = ticker[1:]
if any(not (c.isalnum() or (c in ' \'\"')) for c in a):
raise ValueError()
if t == '$':
return await self.stock_market.get(a)
elif t == '^':
return await self.stock_market.get('^' + a)
elif t == '%':
return await self.osrs_market.get(a)
elif t == '+':
return await self.forex_market.get(a)
elif t == ':':
return await self.crypto_market.get(a)
else:
raise ValueError()
@commands.group(name='stonks')
async def stonks(self, ctx: commands.Context):
pass
@stonks.command(name='info')
async def info(self, ctx: commands.Context, *ticker):
"""
$stock
^index
"%old school runescape"
+forex
:crypto
"""
ticker = ' '.join(ticker)
async with ctx.typing():
try:
r = await self.get_info(ticker)
if r is None:
await ctx.reply('An error occurred while fetching price data.')
else:
await ctx.reply(embed=r.to_embed())
except ValueError:
await ctx.reply('The ticker supplied is invalid')
except TimeoutError:
await ctx.reply('Too many requests; try again in a minute or so.')
except:
traceback.print_exc()
@stonks.command(name='convert')
async def convert(self, ctx: commands.Context, fro, to):
async with ctx.typing():
try:
fro_asset, to_asset = await asyncio.gather(
self.get_info(fro),
self.get_info(to)
)
if (fro_asset is None) or (to_asset is None):
await ctx.reply('An error occurred while fetching at least one of the assets')
else:
e = discord.Embed(title='Asset Conversion')
e.add_field(name='From', value=fro_asset.name, inline=False)
e.add_field(name='To', value=to_asset.name, inline=False)
e.add_field(name='Rate', value=fro_asset.price / to_asset.price, inline=False)
await ctx.send(embed=e)
except ValueError:
await ctx.reply('At least one of the tickers supplied is invalid')
except TimeoutError:
await ctx.reply('Too many requests; try again in a minute or so.')
except:
traceback.print_exc()
def setup(bot):
bot.add_cog(FinanceCog(bot))
|
[
"traceback.print_exc",
"discord.Embed",
"asyncio.sleep",
"time.sleep",
"threading.Lock",
"asyncio.get_running_loop",
"yfinance.Ticker",
"requests.get",
"discord.ext.commands.group",
"threading.Semaphore"
] |
[((3659, 3675), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3673, 3675), False, 'import threading\n'), ((4748, 4764), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4762, 4764), False, 'import threading\n'), ((6604, 6633), 'discord.ext.commands.group', 'commands.group', ([], {'name': '"""stonks"""'}), "(name='stonks')\n", (6618, 6633), False, 'from discord.ext import commands\n'), ((375, 407), 'discord.Embed', 'discord.Embed', ([], {'title': 'self.symbol'}), '(title=self.symbol)\n', (388, 407), False, 'import discord\n'), ((804, 839), 'threading.Semaphore', 'threading.Semaphore', ([], {'value': 'max_reqs'}), '(value=max_reqs)\n', (823, 839), False, 'import threading\n'), ((2967, 2982), 'time.sleep', 'time.sleep', (['(1.2)'], {}), '(1.2)\n', (2977, 2982), False, 'import time\n'), ((953, 982), 'asyncio.sleep', 'asyncio.sleep', (['self.wait_time'], {}), '(self.wait_time)\n', (966, 982), False, 'import asyncio\n'), ((1666, 1689), 'yfinance.Ticker', 'yfinance.Ticker', (['symbol'], {}), '(symbol)\n', (1681, 1689), False, 'import yfinance\n'), ((1532, 1550), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (1545, 1550), False, 'import asyncio\n'), ((2513, 2530), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2525, 2530), False, 'import requests\n'), ((4512, 4571), 'requests.get', 'requests.get', (['"""https://api.frankfurter.app/latest?from=USD"""'], {}), "('https://api.frankfurter.app/latest?from=USD')\n", (4524, 4571), False, 'import requests\n'), ((1181, 1207), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1205, 1207), False, 'import asyncio\n'), ((7480, 7501), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7499, 7501), False, 'import traceback\n'), ((8014, 8053), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Asset Conversion"""'}), "(title='Asset Conversion')\n", (8027, 8053), False, 'import discord\n'), ((8622, 8643), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8641, 8643), False, 'import traceback\n'), ((1343, 1364), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1362, 1364), False, 'import traceback\n'), ((5430, 5490), 'requests.get', 'requests.get', (['f"""https://api.coingecko.com/api/v3/coins/{id}"""'], {}), "(f'https://api.coingecko.com/api/v3/coins/{id}')\n", (5442, 5490), False, 'import requests\n'), ((3937, 3991), 'requests.get', 'requests.get', (['"""https://api.frankfurter.app/currencies"""'], {}), "('https://api.frankfurter.app/currencies')\n", (3949, 3991), False, 'import requests\n'), ((5030, 5089), 'requests.get', 'requests.get', (['"""https://api.coingecko.com/api/v3/coins/list"""'], {}), "('https://api.coingecko.com/api/v3/coins/list')\n", (5042, 5089), False, 'import requests\n')]
|
import numpy as np
import time
from nms.nums_py2 import py_cpu_nms # for cpu
# from nms.gpu_nms import gpu_nms # for gpu
np.random.seed( 1 ) # keep fixed
num_rois = 6000
minxy = np.random.randint(50,145,size=(num_rois ,2))
maxxy = np.random.randint(150,200,size=(num_rois ,2))
score = 0.8*np.random.random_sample((num_rois ,1))+0.2
boxes_new = np.concatenate((minxy,maxxy,score), axis=1).astype(np.float32)
def nms_test_time(boxes_new):
thresh = [0.7,0.8,0.9]
T = 50
for i in range(len(thresh)):
since = time.time()
for t in range(T):
keep = py_cpu_nms(boxes_new, thresh=thresh[i]) # for cpu
# keep = gpu_nms(boxes_new, thresh=thresh[i]) # for gpu
print("thresh={:.1f}, time wastes:{:.4f}".format(thresh[i], (time.time()-since)/T))
return keep
if __name__ =="__main__":
nms_test_time(boxes_new)
|
[
"numpy.random.seed",
"numpy.random.random_sample",
"nms.nums_py2.py_cpu_nms",
"time.time",
"numpy.random.randint",
"numpy.concatenate"
] |
[((127, 144), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (141, 144), True, 'import numpy as np\n'), ((186, 232), 'numpy.random.randint', 'np.random.randint', (['(50)', '(145)'], {'size': '(num_rois, 2)'}), '(50, 145, size=(num_rois, 2))\n', (203, 232), True, 'import numpy as np\n'), ((239, 286), 'numpy.random.randint', 'np.random.randint', (['(150)', '(200)'], {'size': '(num_rois, 2)'}), '(150, 200, size=(num_rois, 2))\n', (256, 286), True, 'import numpy as np\n'), ((297, 335), 'numpy.random.random_sample', 'np.random.random_sample', (['(num_rois, 1)'], {}), '((num_rois, 1))\n', (320, 335), True, 'import numpy as np\n'), ((353, 398), 'numpy.concatenate', 'np.concatenate', (['(minxy, maxxy, score)'], {'axis': '(1)'}), '((minxy, maxxy, score), axis=1)\n', (367, 398), True, 'import numpy as np\n'), ((535, 546), 'time.time', 'time.time', ([], {}), '()\n', (544, 546), False, 'import time\n'), ((594, 633), 'nms.nums_py2.py_cpu_nms', 'py_cpu_nms', (['boxes_new'], {'thresh': 'thresh[i]'}), '(boxes_new, thresh=thresh[i])\n', (604, 633), False, 'from nms.nums_py2 import py_cpu_nms\n'), ((790, 801), 'time.time', 'time.time', ([], {}), '()\n', (799, 801), False, 'import time\n')]
|
def test_mqtt_broker_default_config():
from feeder.util.mqtt.broker import FeederBroker
from feeder import settings
broker = FeederBroker()
assert broker.config["listeners"]["tcp-1"] == {
"bind": f"0.0.0.0:{settings.mqtt_port}"
}
assert broker.config["listeners"]["tcp-ssl-1"] == {
"bind": f"0.0.0.0:{settings.mqtts_port}",
"ssl": True,
"cafile": settings.mqtts_public_key,
"certfile": settings.mqtts_public_key,
"keyfile": settings.mqtts_private_key,
}
def test_mqtt_broker_config_overrides():
from feeder.util.mqtt.broker import FeederBroker
overrides = {"auth": {}}
broker = FeederBroker(config_overrides=overrides)
assert broker.config["auth"] == {}
|
[
"feeder.util.mqtt.broker.FeederBroker"
] |
[((138, 152), 'feeder.util.mqtt.broker.FeederBroker', 'FeederBroker', ([], {}), '()\n', (150, 152), False, 'from feeder.util.mqtt.broker import FeederBroker\n'), ((670, 710), 'feeder.util.mqtt.broker.FeederBroker', 'FeederBroker', ([], {'config_overrides': 'overrides'}), '(config_overrides=overrides)\n', (682, 710), False, 'from feeder.util.mqtt.broker import FeederBroker\n')]
|
import tensorflow as tf
from network.Util import smart_shape
RNNCell = tf.nn.rnn_cell.RNNCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
def _conv2d(x, W, strides=None):
if strides is None:
strides = [1, 1]
return tf.nn.conv2d(x, W, strides=[1] + strides + [1], padding="SAME")
def dynamic_conv_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
# inputs should have shape (time, batch, height, width, feature)
input_shape = smart_shape(inputs)
num_units = cell.num_units()
h, final_state = tf.nn.dynamic_rnn(cell, inputs, sequence_length, initial_state, dtype, parallel_iterations,
swap_memory, time_major, scope)
h = tf.reshape(h, tf.stack([input_shape[0], input_shape[1], input_shape[2], input_shape[3], num_units]))
return h, final_state
# similar to https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# for maximal flexibility we allow to pass the weights externally
class ConvLSTMCell(RNNCell):
def __init__(self, num_units, height, width, filter_size, forget_bias=1.0, activation=tf.tanh, W=None, b=None):
self._num_units = num_units
self._height = height
self._width = width
self._size = num_units * height * width
self._forget_bias = forget_bias
self._activation = activation
self._filter_size = list(filter_size)
if W is not None:
W_shape = W.get_shape().as_list()
assert len(W_shape) == 4
assert W_shape[:2] == self._filter_size
assert W_shape[-1] == 4 * self._num_units
self._W = W
else:
self._W = None
if b is not None:
b_shape = b.get_shape().as_list()
assert len(b_shape) == 1
assert b_shape[0] == 4 * self._num_units
self._b = b
else:
self._b = None
def __call__(self, inputs, state, scope=None):
#inputs: `2-D` tensor with shape `[batch_size x input_size]`.
#state: tuple with shapes `[batch_size x s] for s in self.state_size
with tf.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = state
concat = self._conv(inputs, h)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
i, j, f, o = [tf.reshape(x, [batch, -1]) for x in [i, j, f, o]]
new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * tf.sigmoid(o)
new_state = LSTMStateTuple(new_c, new_h)
return new_h, new_state
def _conv(self, inputs, h):
batch = inputs.get_shape().as_list()[0]
if batch is None:
batch = tf.shape(inputs)[0]
n_input_features = inputs.get_shape().as_list()[-1]
#inputs = tf.reshape(inputs, [batch, self._height, self._width, n_input_features])
h = tf.reshape(h, [batch, self._height, self._width, self._num_units])
inp = tf.concat([inputs, h], axis=3)
if self._W is not None:
W = self._W
assert W.get_shape().as_list()[2] == n_input_features + self._num_units
else:
W = tf.get_variable("W", shape=(self._filter_size + [n_input_features + self._num_units, 4 * self._num_units]))
if self._b is not None:
b = self._b
else:
zero_initializer = tf.constant_initializer(0.0, dtype=inputs.dtype)
b = tf.get_variable("b", shape=(4 * self._num_units), initializer=zero_initializer)
y = _conv2d(inp, W) + b
return y
def num_units(self):
return self._num_units
@property
def state_size(self):
return LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
|
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.get_variable",
"tensorflow.shape",
"tensorflow.nn.conv2d",
"tensorflow.split",
"tensorflow.sigmoid",
"network.Util.smart_shape"
] |
[((228, 291), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '([1] + strides + [1])', 'padding': '"""SAME"""'}), "(x, W, strides=[1] + strides + [1], padding='SAME')\n", (240, 291), True, 'import tensorflow as tf\n'), ((584, 603), 'network.Util.smart_shape', 'smart_shape', (['inputs'], {}), '(inputs)\n', (595, 603), False, 'from network.Util import smart_shape\n'), ((654, 781), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'inputs', 'sequence_length', 'initial_state', 'dtype', 'parallel_iterations', 'swap_memory', 'time_major', 'scope'], {}), '(cell, inputs, sequence_length, initial_state, dtype,\n parallel_iterations, swap_memory, time_major, scope)\n', (671, 781), True, 'import tensorflow as tf\n'), ((835, 924), 'tensorflow.stack', 'tf.stack', (['[input_shape[0], input_shape[1], input_shape[2], input_shape[3], num_units]'], {}), '([input_shape[0], input_shape[1], input_shape[2], input_shape[3],\n num_units])\n', (843, 924), True, 'import tensorflow as tf\n'), ((3187, 3253), 'tensorflow.reshape', 'tf.reshape', (['h', '[batch, self._height, self._width, self._num_units]'], {}), '(h, [batch, self._height, self._width, self._num_units])\n', (3197, 3253), True, 'import tensorflow as tf\n'), ((3264, 3294), 'tensorflow.concat', 'tf.concat', (['[inputs, h]'], {'axis': '(3)'}), '([inputs, h], axis=3)\n', (3273, 3294), True, 'import tensorflow as tf\n'), ((2435, 2487), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(4)', 'value': 'concat'}), '(axis=3, num_or_size_splits=4, value=concat)\n', (2443, 2487), True, 'import tensorflow as tf\n'), ((3440, 3550), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '(self._filter_size + [n_input_features + self._num_units, 4 * self._num_units])'}), "('W', shape=self._filter_size + [n_input_features + self.\n _num_units, 4 * self._num_units])\n", (3455, 3550), True, 'import tensorflow as tf\n'), ((3629, 3677), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {'dtype': 'inputs.dtype'}), '(0.0, dtype=inputs.dtype)\n', (3652, 3677), True, 'import tensorflow as tf\n'), ((3688, 3765), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'shape': '(4 * self._num_units)', 'initializer': 'zero_initializer'}), "('b', shape=4 * self._num_units, initializer=zero_initializer)\n", (3703, 3765), True, 'import tensorflow as tf\n'), ((2614, 2640), 'tensorflow.reshape', 'tf.reshape', (['x', '[batch, -1]'], {}), '(x, [batch, -1])\n', (2624, 2640), True, 'import tensorflow as tf\n'), ((2812, 2825), 'tensorflow.sigmoid', 'tf.sigmoid', (['o'], {}), '(o)\n', (2822, 2825), True, 'import tensorflow as tf\n'), ((3015, 3031), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (3023, 3031), True, 'import tensorflow as tf\n'), ((2574, 2590), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (2582, 2590), True, 'import tensorflow as tf\n'), ((2684, 2717), 'tensorflow.sigmoid', 'tf.sigmoid', (['(f + self._forget_bias)'], {}), '(f + self._forget_bias)\n', (2694, 2717), True, 'import tensorflow as tf\n'), ((2720, 2733), 'tensorflow.sigmoid', 'tf.sigmoid', (['i'], {}), '(i)\n', (2730, 2733), True, 'import tensorflow as tf\n')]
|
import argparse
import base64
import os
import pickle
from google_auth_oauthlib.flow import InstalledAppFlow
SERVICE_SCOPES = {
"drive": ["drive.appdata", "drive.file", "drive.install", "drive"],
"apps-script": ["script.projects"],
}
def get_arguments(parser):
parser.add_argument(
"--credentials",
dest="credentials_file",
default="credentials.json",
help="project credentials file, by default tries to access file "
"from current directory",
)
parser.add_argument(
"--scopes",
dest="scopes",
default=None,
help="authentication scopes as comma separated list",
)
parser.add_argument(
"--service",
dest="service",
default=None,
help="set authentication scopes for the given service, "
"supported services: drive,apps-script",
)
parser.add_argument(
"--console",
dest="console_flow",
action="store_true",
default=False,
help="use to run console based auth flow",
)
return parser.parse_args()
def start():
parser = argparse.ArgumentParser(description="Getting Google OAuth token")
args = get_arguments(parser)
if (
not os.path.exists(args.credentials_file)
or os.stat(args.credentials_file).st_size == 0
):
print(
"WARNING: Credentials file '%s' does not exist or is empty\n"
% args.credentials_file
)
parser.print_help()
return
auth_scopes = []
if args.service and args.service in SERVICE_SCOPES.keys():
auth_scopes.extend(SERVICE_SCOPES[args.service])
if args.scopes:
auth_scopes.extend(args.scopes.split(","))
if not auth_scopes:
print("WARNING: No authentication scopes have been defined!\n")
parser.print_help()
return
googlescopes = [f"https://www.googleapis.com/auth/{scope}" for scope in auth_scopes]
print("Google OAuth Flow for scopes: %s" % (",".join(auth_scopes)))
flow = InstalledAppFlow.from_client_secrets_file(
args.credentials_file, googlescopes
)
if args.console_flow:
credentials = flow.run_console()
else:
credentials = flow.run_local_server()
print(
"\nCopy these credentials into Robocloud Vault:\n%s%s%s"
% (
(40 * "-") + "\n",
str(base64.b64encode(pickle.dumps(credentials)), "utf-8"),
"\n" + (40 * "-") + "\n",
)
)
|
[
"argparse.ArgumentParser",
"os.stat",
"os.path.exists",
"google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"pickle.dumps"
] |
[((1119, 1184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Getting Google OAuth token"""'}), "(description='Getting Google OAuth token')\n", (1142, 1184), False, 'import argparse\n'), ((2040, 2118), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['args.credentials_file', 'googlescopes'], {}), '(args.credentials_file, googlescopes)\n', (2081, 2118), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((1239, 1276), 'os.path.exists', 'os.path.exists', (['args.credentials_file'], {}), '(args.credentials_file)\n', (1253, 1276), False, 'import os\n'), ((1288, 1318), 'os.stat', 'os.stat', (['args.credentials_file'], {}), '(args.credentials_file)\n', (1295, 1318), False, 'import os\n'), ((2408, 2433), 'pickle.dumps', 'pickle.dumps', (['credentials'], {}), '(credentials)\n', (2420, 2433), False, 'import pickle\n')]
|
import apps.common.func.InitDjango
from all_models.models import TbUser, TbAdminUserPermissionRelation
from apps.common.func.WebFunc import *
class UserService(object):
@staticmethod
def getUsers():
return TbUser.objects.all()
@staticmethod
def getUserByLoginname(loginname):
return TbUser.objects.filter(loginName=loginname)
@staticmethod
def updateUser(userData):
tbModel = TbUser.objects.filter(id=userData["id"])
tbModel.update(**userData)
if __name__ == "__main__":
# print(UserService.getUsers()[0])
#permissionDict = UserPermission.getUserPermissions("liyc", "/interfaceTest/HTTP_InterfaceListCheck")
#print(permissionDict)
# print("permissionDict:", permissionDict)
#print("interfaceDict:", interfaceDict)
permissionsList = UserPermission.getOthersPermissions("liyc", ['lining02', 'gaozhe', 'qinjp', 'yongwy', 'pengjie', 'tanglu', 'hongln'], "/interfaceTest/HTTP_GlobalTextConfListPage")
# print("permissionsList:", permissionsList)
# print(UserService.getUserByLoginname(UserService.getUsers()[0].loginName))
|
[
"all_models.models.TbUser.objects.all",
"all_models.models.TbUser.objects.filter"
] |
[((226, 246), 'all_models.models.TbUser.objects.all', 'TbUser.objects.all', ([], {}), '()\n', (244, 246), False, 'from all_models.models import TbUser, TbAdminUserPermissionRelation\n'), ((320, 362), 'all_models.models.TbUser.objects.filter', 'TbUser.objects.filter', ([], {'loginName': 'loginname'}), '(loginName=loginname)\n', (341, 362), False, 'from all_models.models import TbUser, TbAdminUserPermissionRelation\n'), ((430, 470), 'all_models.models.TbUser.objects.filter', 'TbUser.objects.filter', ([], {'id': "userData['id']"}), "(id=userData['id'])\n", (451, 470), False, 'from all_models.models import TbUser, TbAdminUserPermissionRelation\n')]
|
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from load_test_data import load_test_data
from sklearn.metrics import confusion_matrix
from model_result import model_result
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import pickle
import os
import matplotlib
basewidth = 300
hsize = 300
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential( # input shape (1, 28, 28)
nn.Conv2d(
in_channels=1, # input height
out_channels=16, # n_filters
kernel_size=10, # filter size
stride=1, # filter movement/step
padding=2,
# if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1
), # output shape (16, 28, 28)
nn.ReLU(), # activation
nn.MaxPool2d(kernel_size=5), # choose max value in 2x2 area, output shape (16, 14, 14)
)
self.conv2 = nn.Sequential( # input shape (16, 14, 14)
nn.Conv2d(16, 32, 10, 1, 2), # output shape (32, 14, 14)
nn.ReLU(), # activation
nn.MaxPool2d(5), # output shape (32, 7, 7)
)
self.linear1 = nn.Linear(128, 500)
self.linear2 = nn.Linear(500, 30)
self.out = nn.Linear(30, 6)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
x = self.relu(x)
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
x = self.relu(x)
output = self.out(x)
return output, x # return x for visualization
# model_name = input('Please input model name:')
# model_name = model_name + '.pkl'
# model_name = '1.pkl'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dirName = 'Projection_images'
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
for i in range(10):
model_name = str(i+1) + '.pkl'
net = torch.load(model_name)
model_name_pro = str(i+1)
dirName = 'Projection_images/' + 'model_' + model_name_pro + '_first_layer_pro'
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
train_x_file = ''
if train_x_file == '':
# train_x_file = 'train_table_1_4'
train_x_file = 'training'
train_x_file = train_x_file + '.pkl'
data = pickle.load(open(train_x_file, "rb"))
data = torch.from_numpy(data)
data = data.type(torch.FloatTensor)
for img_num in range(data.shape[0]):
img = data[img_num, :, :, :]
img = img.reshape(1, 1, basewidth, hsize)
img = img.to(device)
layer_1 = net.conv1(img)
layer_1 = layer_1.cpu() # 16 filters
for i in range(layer_1.shape[1]):
layer_1_1 = layer_1[0, i, :, :]
layer_1_1 = layer_1_1.detach().numpy()
matplotlib.use('Agg')
fig, ax = plt.subplots()
filename = dirName + '/' + 'image_' + str(img_num+1) + '_filer_' + str(i+1) + '.jpg'
plt.imshow(layer_1_1)
# plt.show()
fig.savefig(filename)
plt.close()
|
[
"os.mkdir",
"torch.from_numpy",
"torch.nn.ReLU",
"matplotlib.pyplot.imshow",
"torch.load",
"torch.nn.Conv2d",
"matplotlib.pyplot.close",
"matplotlib.use",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"matplotlib.pyplot.subplots",
"torch.nn.Sigmoid"
] |
[((2158, 2175), 'os.mkdir', 'os.mkdir', (['dirName'], {}), '(dirName)\n', (2166, 2175), False, 'import os\n'), ((2371, 2393), 'torch.load', 'torch.load', (['model_name'], {}), '(model_name)\n', (2381, 2393), False, 'import torch\n'), ((2955, 2977), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (2971, 2977), False, 'import torch\n'), ((1333, 1352), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(500)'], {}), '(128, 500)\n', (1342, 1352), True, 'import torch.nn as nn\n'), ((1377, 1395), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(30)'], {}), '(500, 30)\n', (1386, 1395), True, 'import torch.nn as nn\n'), ((1416, 1432), 'torch.nn.Linear', 'nn.Linear', (['(30)', '(6)'], {}), '(30, 6)\n', (1425, 1432), True, 'import torch.nn as nn\n'), ((1457, 1469), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1467, 1469), True, 'import torch.nn as nn\n'), ((1491, 1500), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1498, 1500), True, 'import torch.nn as nn\n'), ((2047, 2072), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2070, 2072), False, 'import torch\n'), ((2564, 2581), 'os.mkdir', 'os.mkdir', (['dirName'], {}), '(dirName)\n', (2572, 2581), False, 'import os\n'), ((524, 602), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(16)', 'kernel_size': '(10)', 'stride': '(1)', 'padding': '(2)'}), '(in_channels=1, out_channels=16, kernel_size=10, stride=1, padding=2)\n', (533, 602), True, 'import torch.nn as nn\n'), ((930, 939), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (937, 939), True, 'import torch.nn as nn\n'), ((968, 995), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(5)'}), '(kernel_size=5)\n', (980, 995), True, 'import torch.nn as nn\n'), ((1145, 1172), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(10)', '(1)', '(2)'], {}), '(16, 32, 10, 1, 2)\n', (1154, 1172), True, 'import torch.nn as nn\n'), ((1216, 1225), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1223, 1225), True, 'import torch.nn as nn\n'), ((1254, 1269), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(5)'], {}), '(5)\n', (1266, 1269), True, 'import torch.nn as nn\n'), ((3418, 3439), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3432, 3439), False, 'import matplotlib\n'), ((3463, 3477), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3475, 3477), True, 'import matplotlib.pyplot as plt\n'), ((3589, 3610), 'matplotlib.pyplot.imshow', 'plt.imshow', (['layer_1_1'], {}), '(layer_1_1)\n', (3599, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3685, 3696), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3694, 3696), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""The CPIO path specification resolver helper implementation."""
from dfvfs.file_io import cpio_file_io
from dfvfs.lib import definitions
from dfvfs.resolver_helpers import manager
from dfvfs.resolver_helpers import resolver_helper
from dfvfs.vfs import cpio_file_system
class CPIOResolverHelper(resolver_helper.ResolverHelper):
"""CPIO resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_CPIO
def NewFileObject(self, resolver_context):
"""Creates a new file-like object.
Args:
resolver_context (Context): resolver context.
Returns:
FileIO: file-like object.
"""
return cpio_file_io.CPIOFile(resolver_context)
def NewFileSystem(self, resolver_context):
"""Creates a new file system object.
Args:
resolver_context (Context): resolver context.
Returns:
FileSystem: file system.
"""
return cpio_file_system.CPIOFileSystem(resolver_context)
# Register the resolver helpers with the resolver.
manager.ResolverHelperManager.RegisterHelper(CPIOResolverHelper())
|
[
"dfvfs.file_io.cpio_file_io.CPIOFile",
"dfvfs.vfs.cpio_file_system.CPIOFileSystem"
] |
[((652, 691), 'dfvfs.file_io.cpio_file_io.CPIOFile', 'cpio_file_io.CPIOFile', (['resolver_context'], {}), '(resolver_context)\n', (673, 691), False, 'from dfvfs.file_io import cpio_file_io\n'), ((906, 955), 'dfvfs.vfs.cpio_file_system.CPIOFileSystem', 'cpio_file_system.CPIOFileSystem', (['resolver_context'], {}), '(resolver_context)\n', (937, 955), False, 'from dfvfs.vfs import cpio_file_system\n')]
|
import io
import json
from typing import Optional
import pandas as pd
from astro.constants import DEFAULT_CHUNK_SIZE
from astro.constants import FileType as FileTypeConstants
from astro.files.types.base import FileType
class NDJSONFileType(FileType):
"""Concrete implementation to handle NDJSON file type"""
def export_to_dataframe(self, stream, **kwargs):
"""read ndjson file from one of the supported locations and return dataframe
:param stream: file stream object
"""
return NDJSONFileType.flatten(self.normalize_config, stream)
def create_from_dataframe(self, df: pd.DataFrame, stream: io.TextIOWrapper) -> None:
"""Write ndjson file to one of the supported locations
:param df: pandas dataframe
:param stream: file stream object
"""
df.to_json(stream, orient="records", lines=True)
@property
def name(self):
return FileTypeConstants.NDJSON
@staticmethod
def flatten(
normalize_config: Optional[dict], stream: io.TextIOWrapper
) -> pd.DataFrame:
"""
Flatten the nested ndjson/json.
:param normalize_config: parameters in dict format of pandas json_normalize() function.
https://pandas.pydata.org/docs/reference/api/pandas.json_normalize.html
:param stream: io.TextIOWrapper object for the file
:type normalize_config: dict
:type stream: io.TextIOWrapper
:return: return dataframe containing the loaded data
:rtype: `pandas.DataFrame`
"""
normalize_config = normalize_config or {}
df = None
rows = stream.readlines(DEFAULT_CHUNK_SIZE)
while len(rows) > 0:
if df is None:
df = pd.DataFrame(
pd.json_normalize(
[json.loads(row) for row in rows], **normalize_config
)
)
rows = stream.readlines(DEFAULT_CHUNK_SIZE)
return df
|
[
"json.loads"
] |
[((1834, 1849), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (1844, 1849), False, 'import json\n')]
|
import datetime
import json
import re
import os
import requests
import time
import threading
import pickle
from django.core.mail import send_mail
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import render_to_response, render
from django.core.cache import cache
from ApiManager.utils import schedule
from ApiManager.utils.case_utils import run_case_by_id
from ApiManager.utils.forms import TaskModelForm
from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord
from frame.utils.common import get_ajax_msg, dataToJson
from ApiManager.utils.forms import get_validate_form_msg
from ApiManager.utils.utils import pagination_for_objects
from Joy_QA_Platform.settings import EMAIL_FROM
from Joy_QA_Platform.configs import AUTH_ADD_TASK, AUTH_DELETE, AUTH_UPDATE, AUTH_VIEW, EMAIL_SUFFIX
is_timer_start = False
run_task_list = []
run_job_dict = {}
def task_list(request):
if request.method == "GET":
return render(request, 'api/task_list.html')
elif request.method == "POST":
index = int(request.POST.get('index'))
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
results = filter_tasks_for_user(request.user, TaskInfo.objects.filter().order_by('-id'), AUTH_VIEW)
tasks = pagination_for_objects(results, index)
if tasks is not None and len(tasks) > 0:
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(results)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务列表成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic,
'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_create(request):
if request.method == 'GET':
return render(request, 'api/task_new.html')
elif request.user.has_perm(AUTH_ADD_TASK):
if request.method == 'POST':
model_form = TaskModelForm(request.POST)
if model_form.is_valid():
task_name = request.POST.get('task_name')
env_id = request.POST.get('belong_env')
project_id = request.POST.get('belong_project')
module_id = request.POST.get('belong_module')
emails = request.POST.get('receiver_email')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
if request.POST.get('is_loop') == 'true':
is_loop = True
elif request.POST.get('is_loop') == 'false':
is_loop = False
interval_minute = request.POST.get('interval_minute')
error_msg = None
if not EnvInfo.objects.filter(id=env_id).exists():
error_msg = '此环境不存在'
elif not ProjectInfo.objects.filter(id=project_id).exists():
error_msg = '此项目不存在'
elif not ModuleInfo.objects.filter(id=module_id).exists():
error_msg = '此模块不存在'
elif TaskInfo.objects.filter(task_name=task_name, belong_module_id=module_id).exists():
error_msg = '已存在此任务'
elif start_time <= datetime.datetime.now():
error_msg = '任务开始时间早于当前时间'
elif is_loop and int(interval_minute) < 1:
error_msg = '任务开始循环间隔时间不能小于1分钟'
elif not validate_emails(emails.split(';')):
error_msg = '邮箱格式错误'
if error_msg is not None:
return JsonResponse(get_ajax_msg(0, 0, error_msg, {}))
model_form.instance.belong_env_id = env_id
model_form.instance.belong_project_id = project_id
model_form.instance.belong_module_id = module_id
model_form.instance.start_time = start_time
model_form.instance.receiver_email = deal_emails(emails.split(';'))
model_form.save()
for case_id in request.POST.get('case_list').split(','):
task = TaskInfo.objects.get(task_name=request.POST.get('task_name'))
case = TestCaseInfo.objects.get(id=case_id)
task.cases.add(case)
return JsonResponse(get_ajax_msg(1, 1, '添加任务成功', {}))
else:
msg = get_validate_form_msg(model_form)
return JsonResponse(get_ajax_msg(0, 0, msg))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有创建任务的权限'))
def task_search(request):
if request.method == 'POST':
index = int(request.POST.get('index'))
task_name = request.POST.get('task_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
tasks = None
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
count = 0
if len(task_name) == 0 and len(project_name) == 0 and len(module_name) == 0:
return JsonResponse(get_ajax_msg(0, 0, '搜索条件无效'))
else:
tasks = TaskInfo.objects.all()
if len(module_name) != 0 and module_name != '模块名称':
tasks = tasks.filter(belong_module__module_name__contains=module_name)
if len(project_name) != 0 and project_name != '项目名称':
tasks = tasks.filter(belong_project__project_name__contains=project_name)
if len(task_name) != 0:
tasks = tasks.filter(task_name__contains=task_name)
if tasks == None:
return JsonResponse(get_ajax_msg(0, 0, '查询出错'))
if tasks != None and len(tasks) > 0:
tasks = filter_tasks_for_user(request.user, tasks.order_by('-id'), AUTH_VIEW) # 根据用户权限筛选模块
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(tasks)
tasks = pagination_for_objects(tasks, index)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '搜索成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic, 'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_delete(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
if check_perm(request.user, tasks[0], AUTH_DELETE):
tasks[0].delete()
return JsonResponse(get_ajax_msg(1, 1, '删除成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有删除该任务的权限'))
def task_query(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
tasks = filter_tasks_for_user(request.user, tasks, AUTH_VIEW)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务成功', {'tasks': data}))
def task_update(request):
if request.method == 'POST':
task_form = TaskModelForm(request.POST)
if task_form.is_valid():
task_id = request.POST.get('id')
task_name = request.POST.get('task_name')
env_name = request.POST.get('env_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
receiver_email = request.POST.get('receiver_email')
case_list = request.POST.get('case_list').split(',')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
interval_minute = request.POST.get('interval_minute')
if request.POST.get('is_loop') == 'true':
is_loop = True
if int(interval_minute) < 1:
return JsonResponse(get_ajax_msg(0, 0, '循环间隔时间不能小于1分钟', {}))
elif request.POST.get('is_loop') == 'false':
is_loop = False
if start_time <= datetime.datetime.now():
start_time = datetime.datetime.now()
# return JsonResponse(get_ajax_msg(0, 0, '任务开始时间早于当前时间', {}))
if not validate_emails(receiver_email.split(';')):
return JsonResponse(get_ajax_msg(0, 0, '邮箱格式错误'))
# print(deal_emails(receiver_email.split(';')))
try:
task = TaskInfo.objects.get(id=task_id)
if TaskInfo.objects.filter(task_name=task_name,belong_module_id=module_name).exclude(id=task_id).exists():
return JsonResponse(get_ajax_msg(0, 0, '已存在此任务名称', {}))
if not task.is_run:
if check_perm(request.user, TaskInfo.objects.get(id=task_id), AUTH_UPDATE):
if TaskInfo.objects.update_task(task_id, task_name=task_name, env_name=env_name,
project_name=project_name,
module_name=module_name, receiver_email=deal_emails(receiver_email.split(';')),
case_list=case_list,
start_time=start_time, is_loop=is_loop,
interval_minute=interval_minute):
return JsonResponse(get_ajax_msg(1, 1, '修改任务成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '修改任务失败', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有修改该任务的权限'))
else:
return JsonResponse(get_ajax_msg(0, 0, '请先停止任务', {}))
except:
return JsonResponse(get_ajax_msg(0, 0, '该任务不存在', {}))
else:
msg = get_validate_form_msg(task_form)
return JsonResponse(get_ajax_msg(0, 1, msg))
def task_run(request):
global is_timer_start
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if not task.is_run:
if task.start_time > datetime.datetime.now(): # 任务开始时间必须大于当前时间
pass
else:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
# if not is_timer_start:
# is_timer_start = True
# start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
# start_task_timer.start()
run_task_list.append(task)
task.is_run = True
task.save()
connection.close()
return JsonResponse(get_ajax_msg(1, 1, '该任务成功运行'))
else:
connection.close()
return JsonResponse(get_ajax_msg(0, 0, '该任务正在运行'))
def task_stop(request):
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if task.is_run:
task.is_run = False
task.fail_times = 0
task.save()
# if task in run_task_list:
# run_task_list.remove(task) # 从运行任务列表中删除该任务
try:
# jobs = run_job_dict[task.id]
# for job in jobs:
schedule.cancel_job(task.id)
except KeyError:
print('非循环任务')
return JsonResponse(get_ajax_msg(1, 1, '该任务成功停止'))
else:
return JsonResponse(get_ajax_msg(0, 0, '该任务没有运行'))
def task_monitor(request):
if request.method == 'GET':
return render(request, 'api/task_monitor.html')
if request.method == 'POST':
index = int(request.POST.get('index'))
search_task_name = request.POST.get('task_name')
start = (index - 1) * 10
res = requests.get('http://127.0.0.1:5555/api/tasks?limit=1000') # 控制查询最大数目为1000,以解决查询卡顿的问题
results = json.loads(res.content)
monitor_result_list = []
for result in results.values():
try:
task_dict = {}
args = result['args'].split(',')
# 获取任务信息
infos = args[1].split('-')
if '定时任务' in infos[0]:
task_name = infos[1]
case_name = infos[2]
report_uuid = args[4].split("'")[1]
task_dict['task_name'] = task_name
task_dict['case_name'] = case_name
task_dict['state'] = result['state']
task_dict['result'] = result['result']
task_dict['received'] = result['received']
task_dict['started'] = result['started']
task_dict['runtime'] = result['runtime']
task_dict['report_uuid'] = report_uuid
if search_task_name is not None:
if search_task_name in task_dict['task_name']:
monitor_result_list.append(task_dict)
else:
monitor_result_list.append(task_dict)
except Exception as e:
print('数据解析异常:' + e)
# 根据任务开始时间降序排列
for i in range(len(monitor_result_list) - 1):
for j in range(len(monitor_result_list) - i - 1):
if monitor_result_list[j]['received'] < monitor_result_list[j + 1]['received']:
monitor_result_list[j], monitor_result_list[j + 1] = monitor_result_list[j + 1], monitor_result_list[j]
data = dataToJson(monitor_result_list[start: start + 10])
return JsonResponse(get_ajax_msg(1, 1, '获取监控任务列表成功', {'monitors': data, 'count': len(monitor_result_list), 'currPage': index}))
def thread_run_case(**kwargs):
case_id = kwargs['case_id']
base_url = kwargs['base_url']
task_name = kwargs['task_name']
task_id = kwargs['task_id']
threading.Thread(target=run_case, args=(base_url, case_id, task_name, task_id)).start()
def run_case(base_url, case_id, task_name, task_id):
report_id = run_case_by_id(base_url, case_id, task_name,"定时任务",isTask=True)
time.sleep(5) # 等待报告信息写入数据库
reports = ReportInfo.objects.all().filter(report_id=report_id)
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) > 0:
task = tasks[0]
if len(reports) == 0:
# 若没有此条报告,则认为用例成功,不再需要后续操作
if len(tasks) > 0:
task.fail_times = 0
task.save()
else:
response_result = get_response_result(report_id)
if response_result != True:
task.fail_times += 1
task.save()
# 存失败记录
failRecord = TaskFailedRecord(task_id=task,report_id=reports[0].id,time=datetime.datetime.fromtimestamp(reports[0].test_time))
failRecord.save()
if task.fail_times % 2 == 0 and task.fail_times != 0:
receivers = task.receiver_email.split(';')
for receiver in receivers:
send_warn_mail(task_name, receiver, reports[0].id)
connection.close() # 避免造成mysql连接数过多的问题
def get_response_result(report_id):
response_result = True
try:
reports = ReportInfo.objects.all().filter(report_id=report_id)
if len(reports) > 0:
report = reports[0]
# print(report.result_data)
summury = json.loads(report.result_data)
stat = summury['stat']
if stat['successes'] != stat['testsRun']:
response_result = False
except Exception as e:
print('get_response_code e=====>', e)
return response_result
def send_warn_mail(task_name, receiver, report_id):
tips = task_name + ':监控到接口发生异常!查看报告地址:http://qa.15166.com/api/get_report/?id=' + str(report_id)
try:
email_title = "Joy_QA_Platform 定时任务监控接口"
email_body = tips
# 使用Django内置函数完成邮件发送。四个参数:主题,邮件内容,从哪里发,接受者list
send_status = send_mail(email_title, email_body, EMAIL_FROM, [receiver])
except Exception as e:
print(e)
def task2Dict(task):
task_dict = {}
task_dict["id"] = task.id
task_dict["task_name"] = task.task_name
task_dict["belong_env"] = task.belong_env_id
task_dict["belong_project"] = task.belong_project_id
task_dict["belong_module"] = task.belong_module_id
task_dict["receiver_email"] = task.receiver_email
task_dict["case_id_list"] = []
task_dict["case_name_list"] = []
task_dict["start_time"] = task.start_time
task_dict["is_loop"] = task.is_loop
task_dict["interval_minute"] = task.interval_minute
task_dict["is_run"] = task.is_run
task_dict["fail_times"] = task.fail_times
cases = task.cases.all()
for case in cases:
id = case.id
task_dict["case_id_list"].append(case.id)
task_dict["case_name_list"].append(case.name)
return task_dict
def append_env_dict(task, env_dict):
env_id = task.belong_env_id
env_name = task.belong_env.env_name
env_dict[str(env_id)] = env_name
def append_project_dict(task, project_dict):
project_id = task.belong_project_id
project_name = task.belong_project.project_name
project_dict[str(project_id)] = project_name
def append_module_dict(task, module_dict):
module_id = task.belong_module_id
module_name = task.belong_module.module_name
module_dict[str(module_id)] = module_name
def get_url_from_task(task):
envs = EnvInfo.objects.filter(id=task.belong_env_id)
env = envs[0]
return env.host_port
class StartTaskTimer(threading.Thread):
def __init__(self, run_task_list, run_job_dict):
threading.Thread.__init__(self)
self.run_task_list = run_task_list
self.run_job_dict = run_job_dict
def run(self):
while True:
# lst = self.run_task_list[::]
tasks = get_running_tasks()
for task in tasks:
now = datetime.datetime.now()
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)):
if task.is_loop:
self.run_job_dict[task.id] = start_loop_task(task, thread_run_case)
else:
start_task(task, thread_run_case)
task.is_run = False
task.fail_times = 0
task.save()
# self.run_task_list.remove(task)
else:
pass
time.sleep(5)
mutex = threading.Lock()
def get_running_tasks():
global mutex
with mutex:
result = []
tasks = TaskInfo.objects.filter(is_run=True,is_loop=True)
now = datetime.datetime.now()
for task in tasks:
# 排除可能的重复执行
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)) and (now - task.last_run_time > datetime.timedelta(seconds=5)):
result.append(task)
task.last_run_time = now
task.save()
# if datetime.datetime.now() - task.last_run_time > datetime.timedelta(seconds=task.interval_minute * 60 - 5):
# result.append(task)
connection.close()
if len(result) > 0:
for i in result:
print("获取到任务:",i.task_name)
return result
def start_loop_task(task, func):
base_url = get_url_from_task(task)
jobs = []
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
job = schedule.every(task.interval_minute).minutes.do(thread_run_case, case_id=case.id,
base_url=base_url, task_name=task_name, task_id=task.id)
cache.set("qa_paltform_loop_jobs_"+str(datetime.datetime.now()),pickle.dumps(job),timeout=None)
flag = cache.get("qa_test_platform_running_flag")
# print("flag==="+str(flag))
if flag != 1:
schedule.run_continuously()
# 一定要添加过期时间,否则当值过期时还会起新的线程(发现默认过期时间5分钟,这是django-redis组件和原生redis的区别)
cache.set("qa_test_platform_running_flag",1,timeout=None)
return jobs
def start_task(task, func):
base_url = get_url_from_task(task)
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
def get_task_name(task, case):
name = '定时任务' + '-' + task.task_name + '-' + case.name
return name
def filter_tasks_for_user(user, tasks, perm):
results = []
for task in tasks:
project = task.belong_project
if user.has_perm(perm, project):
results.append(task)
return results
def check_perm(user, task, perm):
project = task.belong_project
return user.has_perm(perm, project)
def restart_running_task():
# 清除redis中的任务缓存
cache.delete_pattern("qa_paltform_loop_jobs_*")
# 清除redis中的分布式锁,避免偶发的锁出现问题,任务会在执行器中的run_pending阻塞
cache.delete_pattern('*qa_test_platform_get')
# 增加是否已经启动了线程的标记,避免每增加一个执行任务就启动一次线程,可能导致任务重复执行
cache.delete_pattern('qa_test_platform_running_flag')
print("清除任务缓存、清除锁、清除线程启动标记")
start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
start_task_timer.start()
tasks = TaskInfo.objects.filter(is_run=True, is_loop=True)
count = 0
for task in tasks:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10*(count+1))
task.save()
count = count + 1
connection.close() # 避免造成mysql连接数过多的问题
def validate_emails(emails):
for email in emails:
if len(email) == 0:
continue
if re.match("^[A-Z0-9a-z._%+-]+" + EMAIL_SUFFIX, email) is None:
return False
return True
def deal_emails(emails):
result = []
for email in emails:
if email not in result:
result.append(email)
resultEmail = ""
for email in result:
resultEmail = resultEmail + ";" + email
return resultEmail[1:]
|
[
"django.core.mail.send_mail",
"django.core.cache.cache.delete_pattern",
"django.db.connection.close",
"threading.Thread.__init__",
"json.loads",
"ApiManager.models.TaskInfo.objects.get",
"ApiManager.utils.forms.get_validate_form_msg",
"threading.Lock",
"ApiManager.models.TaskInfo.objects.all",
"datetime.timedelta",
"ApiManager.utils.forms.TaskModelForm",
"requests.get",
"django.shortcuts.render",
"datetime.datetime.now",
"ApiManager.models.TaskInfo.objects.filter",
"pickle.dumps",
"frame.utils.common.get_ajax_msg",
"threading.Thread",
"ApiManager.models.TestCaseInfo.objects.get",
"re.match",
"django.core.cache.cache.get",
"time.sleep",
"ApiManager.models.EnvInfo.objects.filter",
"datetime.datetime.fromtimestamp",
"ApiManager.utils.schedule.run_continuously",
"ApiManager.models.ModuleInfo.objects.filter",
"ApiManager.utils.schedule.cancel_job",
"ApiManager.models.ProjectInfo.objects.filter",
"frame.utils.common.dataToJson",
"django.core.cache.cache.set",
"ApiManager.utils.utils.pagination_for_objects",
"ApiManager.models.ReportInfo.objects.all",
"ApiManager.utils.case_utils.run_case_by_id",
"ApiManager.utils.schedule.every"
] |
[((20053, 20069), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (20067, 20069), False, 'import threading\n'), ((15619, 15684), 'ApiManager.utils.case_utils.run_case_by_id', 'run_case_by_id', (['base_url', 'case_id', 'task_name', '"""定时任务"""'], {'isTask': '(True)'}), "(base_url, case_id, task_name, '定时任务', isTask=True)\n", (15633, 15684), False, 'from ApiManager.utils.case_utils import run_case_by_id\n'), ((15687, 15700), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (15697, 15700), False, 'import time\n'), ((15795, 15830), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'id': 'task_id'}), '(id=task_id)\n', (15818, 15830), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((16598, 16616), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (16614, 16616), False, 'from django.db import connection\n'), ((18967, 19012), 'ApiManager.models.EnvInfo.objects.filter', 'EnvInfo.objects.filter', ([], {'id': 'task.belong_env_id'}), '(id=task.belong_env_id)\n', (18989, 19012), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((21478, 21520), 'django.core.cache.cache.get', 'cache.get', (['"""qa_test_platform_running_flag"""'], {}), "('qa_test_platform_running_flag')\n", (21487, 21520), False, 'from django.core.cache import cache\n'), ((22511, 22558), 'django.core.cache.cache.delete_pattern', 'cache.delete_pattern', (['"""qa_paltform_loop_jobs_*"""'], {}), "('qa_paltform_loop_jobs_*')\n", (22531, 22558), False, 'from django.core.cache import cache\n'), ((22617, 22662), 'django.core.cache.cache.delete_pattern', 'cache.delete_pattern', (['"""*qa_test_platform_get"""'], {}), "('*qa_test_platform_get')\n", (22637, 22662), False, 'from django.core.cache import cache\n'), ((22718, 22771), 'django.core.cache.cache.delete_pattern', 'cache.delete_pattern', (['"""qa_test_platform_running_flag"""'], {}), "('qa_test_platform_running_flag')\n", (22738, 22771), False, 'from django.core.cache import cache\n'), ((22918, 22968), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'is_run': '(True)', 'is_loop': '(True)'}), '(is_run=True, is_loop=True)\n', (22941, 22968), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((23149, 23167), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (23165, 23167), False, 'from django.db import connection\n'), ((1023, 1060), 'django.shortcuts.render', 'render', (['request', '"""api/task_list.html"""'], {}), "(request, 'api/task_list.html')\n", (1029, 1060), False, 'from django.shortcuts import render_to_response, render\n'), ((2305, 2341), 'django.shortcuts.render', 'render', (['request', '"""api/task_new.html"""'], {}), "(request, 'api/task_new.html')\n", (2311, 2341), False, 'from django.shortcuts import render_to_response, render\n'), ((6543, 6579), 'ApiManager.utils.utils.pagination_for_objects', 'pagination_for_objects', (['tasks', 'index'], {}), '(tasks, index)\n', (6565, 6579), False, 'from ApiManager.utils.utils import pagination_for_objects\n'), ((6736, 6762), 'frame.utils.common.dataToJson', 'dataToJson', (['task_info_list'], {}), '(task_info_list)\n', (6746, 6762), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((7186, 7221), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'id': 'task_id'}), '(id=task_id)\n', (7209, 7221), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((7669, 7704), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'id': 'task_id'}), '(id=task_id)\n', (7692, 7704), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((8024, 8050), 'frame.utils.common.dataToJson', 'dataToJson', (['task_info_list'], {}), '(task_info_list)\n', (8034, 8050), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((8207, 8234), 'ApiManager.utils.forms.TaskModelForm', 'TaskModelForm', (['request.POST'], {}), '(request.POST)\n', (8220, 8234), False, 'from ApiManager.utils.forms import TaskModelForm\n'), ((11299, 11334), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'id': 'task_id'}), '(id=task_id)\n', (11322, 11334), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((12350, 12385), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'id': 'task_id'}), '(id=task_id)\n', (12373, 12385), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((13139, 13179), 'django.shortcuts.render', 'render', (['request', '"""api/task_monitor.html"""'], {}), "(request, 'api/task_monitor.html')\n", (13145, 13179), False, 'from django.shortcuts import render_to_response, render\n'), ((13364, 13422), 'requests.get', 'requests.get', (['"""http://127.0.0.1:5555/api/tasks?limit=1000"""'], {}), "('http://127.0.0.1:5555/api/tasks?limit=1000')\n", (13376, 13422), False, 'import requests\n'), ((13469, 13492), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (13479, 13492), False, 'import json\n'), ((15102, 15151), 'frame.utils.common.dataToJson', 'dataToJson', (['monitor_result_list[start:start + 10]'], {}), '(monitor_result_list[start:start + 10])\n', (15112, 15151), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((17481, 17539), 'django.core.mail.send_mail', 'send_mail', (['email_title', 'email_body', 'EMAIL_FROM', '[receiver]'], {}), '(email_title, email_body, EMAIL_FROM, [receiver])\n', (17490, 17539), False, 'from django.core.mail import send_mail\n'), ((19164, 19195), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (19189, 19195), False, 'import threading\n'), ((20165, 20215), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'is_run': '(True)', 'is_loop': '(True)'}), '(is_run=True, is_loop=True)\n', (20188, 20215), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((20229, 20252), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20250, 20252), False, 'import datetime\n'), ((20733, 20751), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (20749, 20751), False, 'from django.db import connection\n'), ((21580, 21607), 'ApiManager.utils.schedule.run_continuously', 'schedule.run_continuously', ([], {}), '()\n', (21605, 21607), False, 'from ApiManager.utils import schedule\n'), ((21692, 21751), 'django.core.cache.cache.set', 'cache.set', (['"""qa_test_platform_running_flag"""', '(1)'], {'timeout': 'None'}), "('qa_test_platform_running_flag', 1, timeout=None)\n", (21701, 21751), False, 'from django.core.cache import cache\n'), ((1352, 1390), 'ApiManager.utils.utils.pagination_for_objects', 'pagination_for_objects', (['results', 'index'], {}), '(results, index)\n', (1374, 1390), False, 'from ApiManager.utils.utils import pagination_for_objects\n'), ((1826, 1852), 'frame.utils.common.dataToJson', 'dataToJson', (['task_info_list'], {}), '(task_info_list)\n', (1836, 1852), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((5630, 5652), 'ApiManager.models.TaskInfo.objects.all', 'TaskInfo.objects.all', ([], {}), '()\n', (5650, 5652), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((6791, 6962), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""搜索成功"""', "{'tasks': data, 'count': count, 'currPage': index, 'envInfo': env_name_dic,\n 'proInfo': project_name_dic, 'moduleInfo': module_name_dic}"], {}), "(1, 1, '搜索成功', {'tasks': data, 'count': count, 'currPage':\n index, 'envInfo': env_name_dic, 'proInfo': project_name_dic,\n 'moduleInfo': module_name_dic})\n", (6803, 6962), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((8079, 8124), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""获取任务成功"""', "{'tasks': data}"], {}), "(1, 1, '获取任务成功', {'tasks': data})\n", (8091, 8124), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((11019, 11051), 'ApiManager.utils.forms.get_validate_form_msg', 'get_validate_form_msg', (['task_form'], {}), '(task_form)\n', (11040, 11051), False, 'from ApiManager.utils.forms import get_validate_form_msg\n'), ((11995, 12013), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (12011, 12013), False, 'from django.db import connection\n'), ((12103, 12121), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (12119, 12121), False, 'from django.db import connection\n'), ((15460, 15539), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_case', 'args': '(base_url, case_id, task_name, task_id)'}), '(target=run_case, args=(base_url, case_id, task_name, task_id))\n', (15476, 15539), False, 'import threading\n'), ((15730, 15754), 'ApiManager.models.ReportInfo.objects.all', 'ReportInfo.objects.all', ([], {}), '()\n', (15752, 15754), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((16906, 16936), 'json.loads', 'json.loads', (['report.result_data'], {}), '(report.result_data)\n', (16916, 16936), False, 'import json\n'), ((20030, 20043), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (20040, 20043), False, 'import time\n'), ((21435, 21452), 'pickle.dumps', 'pickle.dumps', (['job'], {}), '(job)\n', (21447, 21452), False, 'import pickle\n'), ((23032, 23055), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23053, 23055), False, 'import datetime\n'), ((23058, 23102), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10 * (count + 1))'}), '(seconds=10 * (count + 1))\n', (23076, 23102), False, 'import datetime\n'), ((23305, 23357), 're.match', 're.match', (["('^[A-Z0-9a-z._%+-]+' + EMAIL_SUFFIX)", 'email'], {}), "('^[A-Z0-9a-z._%+-]+' + EMAIL_SUFFIX, email)\n", (23313, 23357), False, 'import re\n'), ((1881, 2056), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""获取任务列表成功"""', "{'tasks': data, 'count': count, 'currPage': index, 'envInfo': env_name_dic,\n 'proInfo': project_name_dic, 'moduleInfo': module_name_dic}"], {}), "(1, 1, '获取任务列表成功', {'tasks': data, 'count': count, 'currPage':\n index, 'envInfo': env_name_dic, 'proInfo': project_name_dic,\n 'moduleInfo': module_name_dic})\n", (1893, 2056), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((2451, 2478), 'ApiManager.utils.forms.TaskModelForm', 'TaskModelForm', (['request.POST'], {}), '(request.POST)\n', (2464, 2478), False, 'from ApiManager.utils.forms import TaskModelForm\n'), ((5022, 5055), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""用户没有创建任务的权限"""'], {}), "(0, 0, '用户没有创建任务的权限')\n", (5034, 5055), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((5566, 5594), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""搜索条件无效"""'], {}), "(0, 0, '搜索条件无效')\n", (5578, 5594), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((6122, 6148), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""查询出错"""'], {}), "(0, 0, '查询出错')\n", (6134, 6148), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((7282, 7314), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""没有这条数据"""', '{}'], {}), "(0, 0, '没有这条数据', {})\n", (7294, 7314), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((7438, 7468), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""删除成功"""', '{}'], {}), "(1, 1, '删除成功', {})\n", (7450, 7468), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((7516, 7550), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""用户没有删除该任务的权限"""'], {}), "(0, 0, '用户没有删除该任务的权限')\n", (7528, 7550), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((7765, 7797), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""没有这条数据"""', '{}'], {}), "(0, 0, '没有这条数据', {})\n", (7777, 7797), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((9163, 9186), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9184, 9186), False, 'import datetime\n'), ((9217, 9240), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9238, 9240), False, 'import datetime\n'), ((9550, 9582), 'ApiManager.models.TaskInfo.objects.get', 'TaskInfo.objects.get', ([], {'id': 'task_id'}), '(id=task_id)\n', (9570, 9582), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((11084, 11107), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(1)', 'msg'], {}), '(0, 1, msg)\n', (11096, 11107), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((11395, 11427), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""没有这条数据"""', '{}'], {}), "(0, 0, '没有这条数据', {})\n", (11407, 11427), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((11515, 11538), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11536, 11538), False, 'import datetime\n'), ((12046, 12075), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""该任务成功运行"""'], {}), "(1, 1, '该任务成功运行')\n", (12058, 12075), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((12154, 12183), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""该任务正在运行"""'], {}), "(0, 0, '该任务正在运行')\n", (12166, 12183), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((12446, 12478), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""没有这条数据"""', '{}'], {}), "(0, 0, '没有这条数据', {})\n", (12458, 12478), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((12834, 12862), 'ApiManager.utils.schedule.cancel_job', 'schedule.cancel_job', (['task.id'], {}), '(task.id)\n', (12853, 12862), False, 'from ApiManager.utils import schedule\n'), ((12955, 12984), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""该任务成功停止"""'], {}), "(1, 1, '该任务成功停止')\n", (12967, 12984), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((13032, 13061), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""该任务没有运行"""'], {}), "(0, 0, '该任务没有运行')\n", (13044, 13061), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((16730, 16754), 'ApiManager.models.ReportInfo.objects.all', 'ReportInfo.objects.all', ([], {}), '()\n', (16752, 16754), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((19456, 19479), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19477, 19479), False, 'import datetime\n'), ((4889, 4922), 'ApiManager.utils.forms.get_validate_form_msg', 'get_validate_form_msg', (['model_form'], {}), '(model_form)\n', (4910, 4922), False, 'from ApiManager.utils.forms import get_validate_form_msg\n'), ((9419, 9447), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""邮箱格式错误"""'], {}), "(0, 0, '邮箱格式错误')\n", (9431, 9447), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((11631, 11654), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11652, 11654), False, 'import datetime\n'), ((11657, 11687), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (11675, 11687), False, 'import datetime\n'), ((16286, 16339), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['reports[0].test_time'], {}), '(reports[0].test_time)\n', (16317, 16339), False, 'import datetime\n'), ((20427, 20456), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (20445, 20456), False, 'import datetime\n'), ((21162, 21198), 'ApiManager.utils.schedule.every', 'schedule.every', (['task.interval_minute'], {}), '(task.interval_minute)\n', (21176, 21198), False, 'from ApiManager.utils import schedule\n'), ((21410, 21433), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21431, 21433), False, 'import datetime\n'), ((1282, 1307), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {}), '()\n', (1305, 1307), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((4700, 4736), 'ApiManager.models.TestCaseInfo.objects.get', 'TestCaseInfo.objects.get', ([], {'id': 'case_id'}), '(id=case_id)\n', (4724, 4736), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((4815, 4847), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""添加任务成功"""', '{}'], {}), "(1, 1, '添加任务成功', {})\n", (4827, 4847), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((4959, 4982), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', 'msg'], {}), '(0, 0, msg)\n', (4971, 4982), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((9003, 9042), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""循环间隔时间不能小于1分钟"""', '{}'], {}), "(0, 0, '循环间隔时间不能小于1分钟', {})\n", (9015, 9042), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((9746, 9780), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""已存在此任务名称"""', '{}'], {}), "(0, 0, '已存在此任务名称', {})\n", (9758, 9780), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((9866, 9898), 'ApiManager.models.TaskInfo.objects.get', 'TaskInfo.objects.get', ([], {'id': 'task_id'}), '(id=task_id)\n', (9886, 9898), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((10863, 10895), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""请先停止任务"""', '{}'], {}), "(0, 0, '请先停止任务', {})\n", (10875, 10895), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((10953, 10985), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""该任务不存在"""', '{}'], {}), "(0, 0, '该任务不存在', {})\n", (10965, 10985), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((19544, 19573), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (19562, 19573), False, 'import datetime\n'), ((20364, 20393), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (20382, 20393), False, 'import datetime\n'), ((4105, 4138), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', 'error_msg', '{}'], {}), '(0, 0, error_msg, {})\n', (4117, 4138), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((10765, 10799), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""用户没有修改该任务的权限"""'], {}), "(0, 0, '用户没有修改该任务的权限')\n", (10777, 10799), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((3239, 3272), 'ApiManager.models.EnvInfo.objects.filter', 'EnvInfo.objects.filter', ([], {'id': 'env_id'}), '(id=env_id)\n', (3261, 3272), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((9602, 9676), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'task_name': 'task_name', 'belong_module_id': 'module_name'}), '(task_name=task_name, belong_module_id=module_name)\n', (9625, 9676), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((10549, 10581), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(1)', '(1)', '"""修改任务成功"""', '{}'], {}), "(1, 1, '修改任务成功', {})\n", (10561, 10581), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((10661, 10693), 'frame.utils.common.get_ajax_msg', 'get_ajax_msg', (['(0)', '(0)', '"""修改任务失败"""', '{}'], {}), "(0, 0, '修改任务失败', {})\n", (10673, 10693), False, 'from frame.utils.common import get_ajax_msg, dataToJson\n'), ((3349, 3390), 'ApiManager.models.ProjectInfo.objects.filter', 'ProjectInfo.objects.filter', ([], {'id': 'project_id'}), '(id=project_id)\n', (3375, 3390), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((3467, 3506), 'ApiManager.models.ModuleInfo.objects.filter', 'ModuleInfo.objects.filter', ([], {'id': 'module_id'}), '(id=module_id)\n', (3492, 3506), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((3579, 3651), 'ApiManager.models.TaskInfo.objects.filter', 'TaskInfo.objects.filter', ([], {'task_name': 'task_name', 'belong_module_id': 'module_id'}), '(task_name=task_name, belong_module_id=module_id)\n', (3602, 3651), False, 'from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord\n'), ((3738, 3761), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3759, 3761), False, 'import datetime\n')]
|
from dagster import check
from dagster.utils import single_item
from .builtin_enum import BuiltinEnum
from .config import ConfigType, List, Nullable
from .wrapping import WrappingListType, WrappingNullableType
class InputSchema:
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def construct_from_config_value(self, config_value):
return config_value
def resolve_config_cls_arg(config_cls):
if isinstance(config_cls, BuiltinEnum):
return ConfigType.from_builtin_enum(config_cls)
elif isinstance(config_cls, WrappingListType):
return List(resolve_config_cls_arg(config_cls.inner_type))
elif isinstance(config_cls, WrappingNullableType):
return Nullable(resolve_config_cls_arg(config_cls.inner_type))
else:
check.type_param(config_cls, 'config_cls')
check.param_invariant(issubclass(config_cls, ConfigType), 'config_cls')
return config_cls.inst()
def make_bare_input_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
class _InputSchema(InputSchema):
@property
def schema_type(self):
return config_type
return _InputSchema()
class OutputSchema:
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def materialize_runtime_value(self, _config_value, _runtime_value):
check.not_implemented('Must implement')
def _create_input_schema(config_type, func):
class _InputSchema(InputSchema):
@property
def schema_type(self):
return config_type
def construct_from_config_value(self, config_value):
return func(config_value)
return _InputSchema()
def input_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
return lambda func: _create_input_schema(config_type, func)
def input_selector_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(config_value):
selector_key, selector_value = single_item(config_value)
return func(selector_key, selector_value)
return _create_input_schema(config_type, _selector)
return _wrap
def _create_output_schema(config_type, func):
class _OutputSchema(OutputSchema):
@property
def schema_type(self):
return config_type
def materialize_runtime_value(self, config_value, runtime_value):
return func(config_value, runtime_value)
return _OutputSchema()
def output_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
return lambda func: _create_input_schema(config_type, func)
def output_selector_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(config_value, runtime_value):
selector_key, selector_value = single_item(config_value)
return func(selector_key, selector_value, runtime_value)
return _create_output_schema(config_type, _selector)
return _wrap
|
[
"dagster.check.not_implemented",
"dagster.utils.single_item",
"dagster.check.type_param",
"dagster.check.param_invariant"
] |
[((2133, 2193), 'dagster.check.param_invariant', 'check.param_invariant', (['config_type.is_selector', '"""config_cls"""'], {}), "(config_type.is_selector, 'config_cls')\n", (2154, 2193), False, 'from dagster import check\n'), ((3027, 3087), 'dagster.check.param_invariant', 'check.param_invariant', (['config_type.is_selector', '"""config_cls"""'], {}), "(config_type.is_selector, 'config_cls')\n", (3048, 3087), False, 'from dagster import check\n'), ((1555, 1594), 'dagster.check.not_implemented', 'check.not_implemented', (['"""Must implement"""'], {}), "('Must implement')\n", (1576, 1594), False, 'from dagster import check\n'), ((2296, 2321), 'dagster.utils.single_item', 'single_item', (['config_value'], {}), '(config_value)\n', (2307, 2321), False, 'from dagster.utils import single_item\n'), ((3205, 3230), 'dagster.utils.single_item', 'single_item', (['config_value'], {}), '(config_value)\n', (3216, 3230), False, 'from dagster.utils import single_item\n'), ((889, 931), 'dagster.check.type_param', 'check.type_param', (['config_cls', '"""config_cls"""'], {}), "(config_cls, 'config_cls')\n", (905, 931), False, 'from dagster import check\n')]
|
from django.conf.urls import url
from django.urls import path,include
from . import views
from .feeds import LatestPostsFeed
from .views import search, PostViewSet
from rest_framework import routers
from django.views.generic import TemplateView
router = routers.DefaultRouter()
router.register(r'api', PostViewSet)
app_name = 'blog'
urlpatterns = [
path('', views.most_viewed, name='most_viewed'),
path('article/', views.post_list, name='post_list'),
url(r'^tag/(?P<tag_slug>[-\w]+)/$', views.post_list, name='post_list_by_tag'),
url(r'^blog/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/(?P<post>[-\w]+)/$', views.post_detail, name='post_detail'),
path('feed/', LatestPostsFeed(), name='post_feed'),
path('about/', views.about_page, name='about'),
path('contact/', views.contact_page, name='contact_page'),
url(r'^author/(?P<post_author>[-\w]+)/$',views.post_author, name='post_author'),
path('search', search, name='search'),
path('me/', views.me, name='me'),
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('offline/', views.offline, name='offline'),
path('fill-dynamic-cache/<int:id>', views.fill_dynamic_cache, name='fill_dynamic_cache'),
path('must-not-cache', views.must_not_cache, name='must_not_cache'),
path(
'sw.js',
views.ServiceWorkerView.as_view(),
name=views.ServiceWorkerView.name,
),
]
|
[
"django.urls.path",
"django.conf.urls.url",
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((257, 280), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (278, 280), False, 'from rest_framework import routers\n'), ((360, 407), 'django.urls.path', 'path', (['""""""', 'views.most_viewed'], {'name': '"""most_viewed"""'}), "('', views.most_viewed, name='most_viewed')\n", (364, 407), False, 'from django.urls import path, include\n'), ((413, 464), 'django.urls.path', 'path', (['"""article/"""', 'views.post_list'], {'name': '"""post_list"""'}), "('article/', views.post_list, name='post_list')\n", (417, 464), False, 'from django.urls import path, include\n'), ((470, 547), 'django.conf.urls.url', 'url', (['"""^tag/(?P<tag_slug>[-\\\\w]+)/$"""', 'views.post_list'], {'name': '"""post_list_by_tag"""'}), "('^tag/(?P<tag_slug>[-\\\\w]+)/$', views.post_list, name='post_list_by_tag')\n", (473, 547), False, 'from django.conf.urls import url\n'), ((553, 680), 'django.conf.urls.url', 'url', (['"""^blog/(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})/(?P<post>[-\\\\w]+)/$"""', 'views.post_detail'], {'name': '"""post_detail"""'}), "('^blog/(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})/(?P<post>[-\\\\w]+)/$'\n , views.post_detail, name='post_detail')\n", (556, 680), False, 'from django.conf.urls import url\n'), ((734, 780), 'django.urls.path', 'path', (['"""about/"""', 'views.about_page'], {'name': '"""about"""'}), "('about/', views.about_page, name='about')\n", (738, 780), False, 'from django.urls import path, include\n'), ((786, 843), 'django.urls.path', 'path', (['"""contact/"""', 'views.contact_page'], {'name': '"""contact_page"""'}), "('contact/', views.contact_page, name='contact_page')\n", (790, 843), False, 'from django.urls import path, include\n'), ((849, 934), 'django.conf.urls.url', 'url', (['"""^author/(?P<post_author>[-\\\\w]+)/$"""', 'views.post_author'], {'name': '"""post_author"""'}), "('^author/(?P<post_author>[-\\\\w]+)/$', views.post_author, name='post_author'\n )\n", (852, 934), False, 'from django.conf.urls import url\n'), ((934, 971), 'django.urls.path', 'path', (['"""search"""', 'search'], {'name': '"""search"""'}), "('search', search, name='search')\n", (938, 971), False, 'from django.urls import path, include\n'), ((977, 1009), 'django.urls.path', 'path', (['"""me/"""', 'views.me'], {'name': '"""me"""'}), "('me/', views.me, name='me')\n", (981, 1009), False, 'from django.urls import path, include\n'), ((1107, 1154), 'django.urls.path', 'path', (['"""offline/"""', 'views.offline'], {'name': '"""offline"""'}), "('offline/', views.offline, name='offline')\n", (1111, 1154), False, 'from django.urls import path, include\n'), ((1160, 1253), 'django.urls.path', 'path', (['"""fill-dynamic-cache/<int:id>"""', 'views.fill_dynamic_cache'], {'name': '"""fill_dynamic_cache"""'}), "('fill-dynamic-cache/<int:id>', views.fill_dynamic_cache, name=\n 'fill_dynamic_cache')\n", (1164, 1253), False, 'from django.urls import path, include\n'), ((1254, 1321), 'django.urls.path', 'path', (['"""must-not-cache"""', 'views.must_not_cache'], {'name': '"""must_not_cache"""'}), "('must-not-cache', views.must_not_cache, name='must_not_cache')\n", (1258, 1321), False, 'from django.urls import path, include\n'), ((1024, 1044), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (1031, 1044), False, 'from django.urls import path, include\n'), ((1070, 1100), 'django.urls.include', 'include', (['"""rest_framework.urls"""'], {}), "('rest_framework.urls')\n", (1077, 1100), False, 'from django.urls import path, include\n')]
|
import subprocess
import os, signal, time
def run_as_test():
proc = subprocess.Popen('python ./bot.py')
time.sleep(10)
if getattr(signal, 'SIGKILL', None):
os.kill(proc.pid, signal.SIGKILL)
else:
os.kill(proc.pid, signal.SIGTERM)
return True
# def run_as_live():
# proc = subprocess.Popen('python bot.py')
# try:
# while True:
# time.sleep(10)
# except KeyBoardInterrupt:
# os.kill(proc.pid, signal.SIGKILL)
|
[
"os.kill",
"subprocess.Popen",
"time.sleep"
] |
[((73, 108), 'subprocess.Popen', 'subprocess.Popen', (['"""python ./bot.py"""'], {}), "('python ./bot.py')\n", (89, 108), False, 'import subprocess\n'), ((113, 127), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (123, 127), False, 'import os, signal, time\n'), ((177, 210), 'os.kill', 'os.kill', (['proc.pid', 'signal.SIGKILL'], {}), '(proc.pid, signal.SIGKILL)\n', (184, 210), False, 'import os, signal, time\n'), ((229, 262), 'os.kill', 'os.kill', (['proc.pid', 'signal.SIGTERM'], {}), '(proc.pid, signal.SIGTERM)\n', (236, 262), False, 'import os, signal, time\n')]
|
import logging
from typing import List, Iterable, Dict, Union, Any, Optional, Iterator, Tuple
from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine
from presidio_analyzer.nlp_engine import NlpArtifacts
logger = logging.getLogger("presidio-analyzer")
class BatchAnalyzerEngine:
"""
Batch analysis of documents (tables, lists, dicts).
Wrapper class to run Presidio Analyzer Engine on multiple values,
either lists/iterators of strings, or dictionaries.
:param: analyzer_engine: AnalyzerEngine instance to use
for handling the values in those collections.
"""
def __init__(self, analyzer_engine: Optional[AnalyzerEngine] = None):
self.analyzer_engine = analyzer_engine
if not analyzer_engine:
self.analyzer_engine = AnalyzerEngine()
def analyze_iterator(
self,
texts: Iterable[Union[str, bool, float, int]],
language: str,
**kwargs,
) -> List[List[RecognizerResult]]:
"""
Analyze an iterable of strings.
:param texts: An list containing strings to be analyzed.
:param language: Input language
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
"""
# validate types
texts = self._validate_types(texts)
# Process the texts as batch for improved performance
nlp_artifacts_batch: Iterator[
Tuple[str, NlpArtifacts]
] = self.analyzer_engine.nlp_engine.process_batch(
texts=texts, language=language
)
list_results = []
for text, nlp_artifacts in nlp_artifacts_batch:
results = self.analyzer_engine.analyze(
text=str(text), nlp_artifacts=nlp_artifacts, language=language, **kwargs
)
list_results.append(results)
return list_results
def analyze_dict(
self,
input_dict: Dict[str, Union[Any, Iterable[Any]]],
language: str,
keys_to_skip: Optional[List[str]] = None,
**kwargs,
) -> Iterator[DictAnalyzerResult]:
"""
Analyze a dictionary of keys (strings) and values/iterable of values.
Non-string values are returned as is.
:param input_dict: The input dictionary for analysis
:param language: Input language
:param keys_to_skip: Keys to ignore during analysis
:param kwargs: Additional keyword arguments
for the `AnalyzerEngine.analyze` method.
Use this to pass arguments to the analyze method,
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
See `AnalyzerEngine.analyze` for the full list.
"""
context = []
if "context" in kwargs:
context = kwargs["context"]
del kwargs["context"]
if not keys_to_skip:
keys_to_skip = []
for key, value in input_dict.items():
if not value or key in keys_to_skip:
yield DictAnalyzerResult(key=key, value=value, recognizer_results=[])
continue # skip this key as requested
# Add the key as an additional context
specific_context = context[:]
specific_context.append(key)
if type(value) in (str, int, bool, float):
results: List[RecognizerResult] = self.analyzer_engine.analyze(
text=str(value), language=language, context=[key], **kwargs
)
elif isinstance(value, dict):
new_keys_to_skip = self._get_nested_keys_to_skip(key, keys_to_skip)
results = self.analyze_dict(
input_dict=value,
language=language,
context=specific_context,
keys_to_skip=new_keys_to_skip,
**kwargs,
)
elif isinstance(value, Iterable):
# Recursively iterate nested dicts
results: List[List[RecognizerResult]] = self.analyze_iterator(
texts=value,
language=language,
context=specific_context,
**kwargs,
)
else:
raise ValueError(f"type {type(value)} is unsupported.")
yield DictAnalyzerResult(key=key, value=value, recognizer_results=results)
@staticmethod
def _validate_types(value_iterator: Iterable[Any]) -> Iterator[Any]:
for val in value_iterator:
if not type(val) in (int, float, bool, str):
err_msg = (
"Analyzer.analyze_iterator only works "
"on primitive types (int, float, bool, str). "
"Lists of objects are not yet supported."
)
logger.error(err_msg)
raise ValueError(err_msg)
yield val
@staticmethod
def _get_nested_keys_to_skip(key, keys_to_skip):
new_keys_to_skip = [
k.replace(f"{key}.", "") for k in keys_to_skip if k.startswith(key)
]
return new_keys_to_skip
|
[
"presidio_analyzer.AnalyzerEngine",
"logging.getLogger",
"presidio_analyzer.DictAnalyzerResult"
] |
[((242, 280), 'logging.getLogger', 'logging.getLogger', (['"""presidio-analyzer"""'], {}), "('presidio-analyzer')\n", (259, 280), False, 'import logging\n'), ((810, 826), 'presidio_analyzer.AnalyzerEngine', 'AnalyzerEngine', ([], {}), '()\n', (824, 826), False, 'from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine\n'), ((4355, 4423), 'presidio_analyzer.DictAnalyzerResult', 'DictAnalyzerResult', ([], {'key': 'key', 'value': 'value', 'recognizer_results': 'results'}), '(key=key, value=value, recognizer_results=results)\n', (4373, 4423), False, 'from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine\n'), ((3022, 3085), 'presidio_analyzer.DictAnalyzerResult', 'DictAnalyzerResult', ([], {'key': 'key', 'value': 'value', 'recognizer_results': '[]'}), '(key=key, value=value, recognizer_results=[])\n', (3040, 3085), False, 'from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine\n')]
|
#coding=utf-8
#调色板
import cv2
import numpy as np
img = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('image')
def callback(x):
pass
#参数1:名称;参数2:作用窗口,参数3、4:最小值和最大值;参数5:值更改回调方法
cv2.createTrackbar('R', 'image', 0, 255, callback)
cv2.createTrackbar('G', 'image', 0, 255, callback)
cv2.createTrackbar('B', 'image', 0, 255, callback)
while (1):
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
img[:] = [b, g, r]
cv2.destroyAllWindows()
|
[
"cv2.createTrackbar",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.getTrackbarPos",
"cv2.imshow",
"cv2.namedWindow"
] |
[((56, 89), 'numpy.zeros', 'np.zeros', (['(300, 512, 3)', 'np.uint8'], {}), '((300, 512, 3), np.uint8)\n', (64, 89), True, 'import numpy as np\n'), ((90, 114), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (105, 114), False, 'import cv2\n'), ((188, 238), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""R"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('R', 'image', 0, 255, callback)\n", (206, 238), False, 'import cv2\n'), ((239, 289), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""G"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('G', 'image', 0, 255, callback)\n", (257, 289), False, 'import cv2\n'), ((290, 340), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""B"""', '"""image"""', '(0)', '(255)', 'callback'], {}), "('B', 'image', 0, 255, callback)\n", (308, 340), False, 'import cv2\n'), ((587, 610), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (608, 610), False, 'import cv2\n'), ((357, 381), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (367, 381), False, 'import cv2\n'), ((447, 479), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""R"""', '"""image"""'], {}), "('R', 'image')\n", (465, 479), False, 'import cv2\n'), ((488, 520), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""G"""', '"""image"""'], {}), "('G', 'image')\n", (506, 520), False, 'import cv2\n'), ((529, 561), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""B"""', '"""image"""'], {}), "('B', 'image')\n", (547, 561), False, 'import cv2\n'), ((389, 403), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (400, 403), False, 'import cv2\n')]
|
import os
import sys
# import inspect
def main():
lib_directory = None
# All Python Version that will be searched
lib_major_version = 'lib_{}'.format(sys.version_info.major)
lib_minor_version = '{}.{}'.format(lib_major_version, sys.version_info.minor)
lib_micro_version = '{}.{}'.format(lib_minor_version, sys.version_info.micro)
# get all "lib" directories
app_path = os.getcwd()
contents = os.listdir(app_path)
lib_directories = []
for c in contents:
# ensure content starts with lib, is directory, and is readable
if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):
lib_directories.append(c)
# find most appropriate FULL version
if lib_micro_version in lib_directories:
lib_directory = lib_micro_version
elif lib_minor_version in lib_directories:
lib_directory = lib_minor_version
elif lib_major_version in lib_directories:
lib_directory = lib_major_version
else:
# file most appropriate PARTIAL version
for ld in lib_directories:
if lib_micro_version in ld:
lib_directory = ld
elif lib_minor_version in ld:
lib_directory = ld
elif lib_major_version in ld:
lib_directory = ld
if lib_directory is None:
print('Failed to find lib directory ({}).'.format(lib_directories))
sys.exit(1)
# use this if you want to include modules from a subfolder
# lib_path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], lib_directory)))
lib_path = os.path.join(app_path, lib_directory)
for root, directories, files in os.walk(lib_path):
while len(directories) > 0:
module = os.path.join(root, directories.pop(0))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{}{}{}'.format(module, os.pathsep, os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = '{}'.format(module)
# os.environ['LD_LIBRARY_PATH'] = ''
sys.argv[0] = sys.executable
sys.argv[1] = '{}.py'.format(sys.argv[1])
os.execv('{}'.format(sys.executable), sys.argv)
if __name__ == '__main__':
main()
|
[
"os.getcwd",
"os.path.isdir",
"os.walk",
"os.access",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((400, 411), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (409, 411), False, 'import os\n'), ((427, 447), 'os.listdir', 'os.listdir', (['app_path'], {}), '(app_path)\n', (437, 447), False, 'import os\n'), ((1661, 1698), 'os.path.join', 'os.path.join', (['app_path', 'lib_directory'], {}), '(app_path, lib_directory)\n', (1673, 1698), False, 'import os\n'), ((1735, 1752), 'os.walk', 'os.walk', (['lib_path'], {}), '(lib_path)\n', (1742, 1752), False, 'import os\n'), ((1431, 1442), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1439, 1442), False, 'import sys\n'), ((603, 619), 'os.path.isdir', 'os.path.isdir', (['c'], {}), '(c)\n', (616, 619), False, 'import os\n'), ((625, 646), 'os.access', 'os.access', (['c', 'os.R_OK'], {}), '(c, os.R_OK)\n', (634, 646), False, 'import os\n')]
|
import os
from irc_poker_data_set import IrcPokerData as ipd
basedir = os.path.abspath(os.path.dirname(__file__))
irc_poker_data = ipd()
irc_poker_data.open()
from irc_poker_db import db_session, PlayerRanking
def player_ranking():
if db_session.query(PlayerRanking).count() >= db_session.query(ipd.Player).count():
return
### travers all players
i = 0
players = ipd.db_session.query(ipd.Player).all()
for player in players:
i += 1
if i%100 == 0:
db_session.commit()
p = db_session.query(PlayerRanking).filter(PlayerRanking.player_id==player.id).first()
if p:
print(p.__repr__())
continue
total_pay = 0
total_win = 0
round_num = 0
rounds = ipd.db_session.query(ipd.Round).filter(ipd.Round.player_id==player.id).all()
for round in rounds:
total_pay += round.pay
total_win += round.win
round_num += 1
average_points = (total_win-total_pay)/round_num
player_ranking = PlayerRanking(player_id=player.id,
player_name=player.player_name,
average_points=average_points,
total_pay=total_pay,
total_win=total_win,
round_num=round_num)
print(player_ranking.__repr__())
db_session.add(player_ranking)
db_session.commit()
player_ranking()
|
[
"irc_poker_data_set.IrcPokerData.db_session.query",
"irc_poker_db.db_session.add",
"irc_poker_data_set.IrcPokerData",
"irc_poker_db.db_session.commit",
"os.path.dirname",
"irc_poker_db.db_session.query",
"irc_poker_db.PlayerRanking"
] |
[((133, 138), 'irc_poker_data_set.IrcPokerData', 'ipd', ([], {}), '()\n', (136, 138), True, 'from irc_poker_data_set import IrcPokerData as ipd\n'), ((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n'), ((1506, 1525), 'irc_poker_db.db_session.commit', 'db_session.commit', ([], {}), '()\n', (1523, 1525), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((1059, 1227), 'irc_poker_db.PlayerRanking', 'PlayerRanking', ([], {'player_id': 'player.id', 'player_name': 'player.player_name', 'average_points': 'average_points', 'total_pay': 'total_pay', 'total_win': 'total_win', 'round_num': 'round_num'}), '(player_id=player.id, player_name=player.player_name,\n average_points=average_points, total_pay=total_pay, total_win=total_win,\n round_num=round_num)\n', (1072, 1227), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((1471, 1501), 'irc_poker_db.db_session.add', 'db_session.add', (['player_ranking'], {}), '(player_ranking)\n', (1485, 1501), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((393, 425), 'irc_poker_data_set.IrcPokerData.db_session.query', 'ipd.db_session.query', (['ipd.Player'], {}), '(ipd.Player)\n', (413, 425), True, 'from irc_poker_data_set import IrcPokerData as ipd\n'), ((509, 528), 'irc_poker_db.db_session.commit', 'db_session.commit', ([], {}), '()\n', (526, 528), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((245, 276), 'irc_poker_db.db_session.query', 'db_session.query', (['PlayerRanking'], {}), '(PlayerRanking)\n', (261, 276), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((288, 316), 'irc_poker_db.db_session.query', 'db_session.query', (['ipd.Player'], {}), '(ipd.Player)\n', (304, 316), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((541, 572), 'irc_poker_db.db_session.query', 'db_session.query', (['PlayerRanking'], {}), '(PlayerRanking)\n', (557, 572), False, 'from irc_poker_db import db_session, PlayerRanking\n'), ((774, 805), 'irc_poker_data_set.IrcPokerData.db_session.query', 'ipd.db_session.query', (['ipd.Round'], {}), '(ipd.Round)\n', (794, 805), True, 'from irc_poker_data_set import IrcPokerData as ipd\n')]
|
import os, sys
from importlib import import_module
sys.path += [os.getcwd()]
def get_params(params_file, import_path=""):
"""Extract the params object from a given Python file.
:param str params_file: the Python file to get the params object from.
:returns: the params object from the given Python file.
"""
sys.path += [import_path]
module_name = params_file.replace(".py", "").replace("/", ".")
module = import_module(module_name)
return module.params
def format_manip_info(manip_info):
"""Format the ANSSI/LSC manip_info dictionary format into a more generic format compliant with the Manip class.
:param dict manip_info: the information about the a manip to format.
:returns: a dictionary containing all the attributes for creating a Manip class.
"""
result_file = manip_info["base_dir"] + "/devices/" + manip_info["device"] + "/manips/" + manip_info["manip_name"] + "/results/" + manip_info["result_name"] + "/main.csv"
analysis_params = get_params(manip_info["params_file"])
id_name = manip_info["id_name"]
ret = {
"result_file": result_file,
"analysis_params": analysis_params,
"id_name": id_name
}
return ret
|
[
"os.getcwd",
"importlib.import_module"
] |
[((66, 77), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (75, 77), False, 'import os, sys\n'), ((440, 466), 'importlib.import_module', 'import_module', (['module_name'], {}), '(module_name)\n', (453, 466), False, 'from importlib import import_module\n')]
|
from properties.models import AvailableLanguage
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
class TestLanguage(TestCase):
fixtures = ['properties_data.yaml']
def setUp(self):
self.non_exist_lang = "never_exist_lang"
self.lang_python = "Python"
self.lang_ruby = "Ruby"
def test_get_all_available_language(self):
self.assertIsNotNone(AvailableLanguage.objects.all())
def test_get_python(self):
self.assertIsNotNone(AvailableLanguage.objects.get(lang=self.lang_python))
def test_get_python_value_must_be_equal_to_python(self):
language_object = AvailableLanguage.objects.get(lang=self.lang_python)
self.assertEqual(self.lang_python, language_object.lang)
def test_get_ruby(self):
self.assertIsNotNone(AvailableLanguage.objects.get(lang=self.lang_ruby))
def test_get_ruby_value_must_be_equal_to_ruby(self):
language_object = AvailableLanguage.objects.get(lang=self.lang_ruby)
self.assertEqual(self.lang_ruby, language_object.lang)
def test_get_non_exist_language_must_be_fail(self):
self.assertRaises(
ObjectDoesNotExist,
AvailableLanguage.objects.get,
lang = self.non_exist_lang,
)
|
[
"properties.models.AvailableLanguage.objects.get",
"properties.models.AvailableLanguage.objects.all"
] |
[((657, 709), 'properties.models.AvailableLanguage.objects.get', 'AvailableLanguage.objects.get', ([], {'lang': 'self.lang_python'}), '(lang=self.lang_python)\n', (686, 709), False, 'from properties.models import AvailableLanguage\n'), ((970, 1020), 'properties.models.AvailableLanguage.objects.get', 'AvailableLanguage.objects.get', ([], {'lang': 'self.lang_ruby'}), '(lang=self.lang_ruby)\n', (999, 1020), False, 'from properties.models import AvailableLanguage\n'), ((421, 452), 'properties.models.AvailableLanguage.objects.all', 'AvailableLanguage.objects.all', ([], {}), '()\n', (450, 452), False, 'from properties.models import AvailableLanguage\n'), ((515, 567), 'properties.models.AvailableLanguage.objects.get', 'AvailableLanguage.objects.get', ([], {'lang': 'self.lang_python'}), '(lang=self.lang_python)\n', (544, 567), False, 'from properties.models import AvailableLanguage\n'), ((834, 884), 'properties.models.AvailableLanguage.objects.get', 'AvailableLanguage.objects.get', ([], {'lang': 'self.lang_ruby'}), '(lang=self.lang_ruby)\n', (863, 884), False, 'from properties.models import AvailableLanguage\n')]
|
# -*- coding: utf-8 -*-
from .energydiagram import ED
import matplotlib.pyplot as plt
import re
ADJUSTEDCOEFFICIENT=0.02
def GetFrontIndex(orbSign):
# In: HOMO/LUMO/HOMO-1/LUMO+1
# Out: {'hoLu': 'HOMO', 'num': -1}
for matchString in [r'HOMO(.*)', r'LUMO(.*)']:
matchObj = re.match(matchString, orbSign)
if matchObj:
hoLu = re.sub(r'(.[0-9]+)', "", matchObj.group())
num = re.sub(r'([a-zA-Z]+)', "", matchObj.group())
if num:
return {'hoLu': hoLu, 'num': num}
else:
return {'hoLu': hoLu, 'num': 0}
def AdjustEnergy(orbList, orbEnergylistsorted):
def ModifyMOName(MO):
# Taking MO such as HOMO LUMO HOMO-1 LUMO+1, convert into H L L+1 H-1
if MO == 'HOMO':
newMO = -0.5
elif MO == 'LUMO':
# "L need a extra empty space, which is weired"
newMO = 0.5
else:
newMO = int(MO[-2:])
return newMO
orbListnew = sorted(orbList, key=ModifyMOName)
sortedEnergylist = []
for orb in orbList:
for orbSort, energy in zip(orbListnew, orbEnergylistsorted):
if orbSort == orb:
sortedEnergylist.append(energy)
return sortedEnergylist
def ModifyOrbLevel(orbEnergylist, energyGap, orbList):
orbEnergylistsorted = sorted(orbEnergylist)
# adjust the energy of the orbital levels
for index, i in enumerate(orbEnergylistsorted):
if index != 0:
for j in orbEnergylistsorted[:index]:
if abs(orbEnergylistsorted[index] - j) < energyGap:
orbEnergylistsorted[index] = orbEnergylistsorted[index] + energyGap
sortedEnergylist = AdjustEnergy(orbList, orbEnergylistsorted)
return sortedEnergylist
def NewdataSummarylist(dataSummarylist, energyGap=0.1):
frag1Energylist = []
frag1Orblist = []
frag2Energylist = []
frag2Orblist = []
complexEnergylist = []
complexOrblist = []
frag1Countertotal = 0
frag2Countertotal = 0
complexCountertotal = 0
for orbList in dataSummarylist:
for key, val in list(orbList['compOrb1'].items()):
if val['fragType'] == 'frag1':
frag1Energylist.append(val['fragEnergy'])
frag1Orblist.append(val['fragOrb'])
else:
frag2Energylist.append(val['fragEnergy'])
frag2Orblist.append(val['fragOrb'])
for key, val in list(orbList.items()):
complexEnergylist.append(val['fragOrb1']['compEnergy'])
complexOrblist.append(val['fragOrb1']['compOrb'])
frag1EnergylistModified = ModifyOrbLevel(frag1Energylist, energyGap, frag1Orblist)
frag2EnergylistModified = ModifyOrbLevel(frag2Energylist, energyGap, frag2Orblist)
complexEnergylistModified = ModifyOrbLevel(complexEnergylist, energyGap, complexOrblist)
for orbList in dataSummarylist:
for i in orbList:
frag1Counter = 0
frag2Counter = 0
for j in orbList[i]:
orbList[i][j]["complexModifiedenergy"]=complexEnergylistModified[complexCountertotal]
if orbList[i][j]['fragType'] == "frag1":
orbList[i][j]["fragModifiedenergy"]=frag1EnergylistModified[frag1Countertotal]
frag1Countertotal += 1
frag1Counter += 1
else:
orbList[i][j]["fragModifiedenergy"]=frag2EnergylistModified[frag2Countertotal]
frag2Countertotal += 1
frag2Counter += 1
frag1Countertotal = frag1Countertotal - frag1Counter
frag2Countertotal = frag2Countertotal - frag2Counter
complexCountertotal += 1
frag1Countertotal = frag1Countertotal + frag1Counter
frag2Countertotal = frag2Countertotal + frag2Counter
return dataSummarylist
def OrbMixing(orbital, mixlist, color='b', times=0):
def ModifyMOName(MO):
# Taking MO such as HOMO LUMO HOMO-1 LUMO+1, convert into H L L+1 H-1
if MO == 'HOMO':
newMO = 'H' + ' '
elif MO == 'LUMO':
# "L need a extra empty space, which is weired"
newMO = 'L' + ' '
else:
newMO = MO[0:1] + MO[-2:]
return newMO
# add levels
message = ''
orbNumber = 1
mix1Frag1number = 0
# add empty space
orbital.AddLevel(-2, color='w')
# add frag1 level
# check if there are no frag1 orbital invloved
frag1Orbnum = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag1':
frag1Orbnum = frag1Orbnum+1
if frag1Orbnum == 0:
orbital.AddLevel(-2, color='w')
orbNumber = 2
else:
i = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag1':
mix1Frag1number+=1
label = val['fragOrb']
energy = val['fragEnergy']
population= val['population']
name = val['fragName']
message = '{0:.3f}'.format(population) + 'e'+ ' ' + '{0:.1f}'.format(energy) + ' ' + str(name)+ ' ' + str(ModifyMOName(val['fragOrb']))
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragModifiedenergy']
if i == 0:
orbital.AddLevel(energy, leftText=message, color=color)
else:
orbital.AddLevel(energy, position='last', leftText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add complex level
i = 0
for key, val in list(mixlist.items()):
label = val['fragOrb1']['compOrb']
energy = val['fragOrb1']['compEnergy']
name = val['fragOrb1']['compName']
message = str(ModifyMOName(val['fragOrb1']['compOrb'])) + ' ' + str(name) + ' ' + '{0:.1f}'.format(energy)
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragOrb1']['complexModifiedenergy']
if times == 0:
if i == 0:
orbital.AddLevel(energy, rightText=message, color=color)
else:
orbital.AddLevel(energy, position='last', rightText=message, color=color)
else:
if i == 0:
orbital.AddLevel(energy, leftText=message, color=color)
else:
orbital.AddLevel(energy, position='last', leftText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add frag2 levels
i = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag2':
label = val['fragOrb']
energy = val['fragEnergy']
population= val['population']
name = val['fragName']
message = str(ModifyMOName(val['fragOrb'])) + ' ' + str(name) + ' ' + '{0:.1f}'.format(energy) + ' ' + '{0:.3f}'.format(population) + 'e'
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragModifiedenergy']
# move one step right
if i == 0:
orbital.AddLevel(energy, rightText=message, color=color)
else:
orbital.AddLevel(energy, position='last', rightText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add frag1 and complex link
i = 1
for frag1Orb, frag1Val in list(mixlist['compOrb1'].items()):
if frag1Val['fragType'] == 'frag1':
k = 1
for compOrb, compVal in list(mixlist.items()):
coefi = compVal[frag1Orb]['fragCoef']
orbital.AddLink(i, k+mix1Frag1number, color=color, coeficient=coefi, reverse=True)
k+=1
i+=1
# add complex and frag2 link
if frag1Orbnum == 0:
mix1Frag1number = 1
i = 1
for compOrb, compVal in list(mixlist.items()):
k = 1
for frag2Orb, frag2Val in list(mixlist['compOrb1'].items()):
if frag2Val['fragType'] == 'frag2':
coefi = compVal[frag2Orb]['fragCoef']
orbital.AddLink(i+mix1Frag1number, k+mix1Frag1number+len(mixlist), color=color, coeficient=coefi, reverse=True, fragment2complex=False)
k+=1
i+=1
orbital.AddLevel(-2, color='w')
return orbital
def DrawOrbLevels(dataSummarylist, fileName):
energyList = []
for orbList in dataSummarylist:
for key, val in list(orbList['compOrb1'].items()):
energyList.append(val['fragEnergy'])
for key, val in list(orbList.items()):
energyList.append(val['fragOrb1']['compEnergy'])
energyGap = abs(max(energyList) - min(energyList))
dataSummarylistNew = NewdataSummarylist(dataSummarylist, energyGap=energyGap*ADJUSTEDCOEFFICIENT)
orbital = ED(energyGap=energyGap*0.8)
colorList = ['red', 'green', 'black', 'tan', 'red', 'black', 'red', 'green', 'darkorange', 'tan', 'black' ]
orbital = OrbMixing(orbital, dataSummarylistNew[0], color='b', times = 0)
orbital.Plot(showIDs=True)
i = 1
for orbList in dataSummarylistNew[1:]:
orbital.ReInit()
evenOdd= i - (i//2)*2
orbital = OrbMixing(orbital, orbList, color=colorList[i-1], times = evenOdd)
orbital.Plot(showIDs=True)
i = i+1
plt.savefig(fileName+'.png',dpi=300)
# plt.show()
plt.close()
|
[
"matplotlib.pyplot.close",
"re.match",
"matplotlib.pyplot.savefig"
] |
[((9364, 9403), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fileName + '.png')"], {'dpi': '(300)'}), "(fileName + '.png', dpi=300)\n", (9375, 9403), True, 'import matplotlib.pyplot as plt\n'), ((9418, 9429), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9427, 9429), True, 'import matplotlib.pyplot as plt\n'), ((286, 316), 're.match', 're.match', (['matchString', 'orbSign'], {}), '(matchString, orbSign)\n', (294, 316), False, 'import re\n')]
|
from os import getenv
from pathlib import Path
import dj_database_url
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = getenv("DJANGO_SECRET_KEY")
DEBUG = getenv("DEBUG") == "true"
ALLOWED_HOSTS = ["*"]
AUTH_USER_MODEL = "users_app.CustomUser"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# disable Django’s static file handling during development so that whitenoise can take over
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"parking_permits_app",
"users_app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
# WhiteNoiseMiddleware should be above all and just below SecurityMiddleware
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
DATABASE_URL = getenv("DATABASE_URL")
DATABASES = {"default": dj_database_url.parse(DATABASE_URL)}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static-files"
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
[
"pathlib.Path",
"os.getenv",
"dj_database_url.parse"
] |
[((137, 164), 'os.getenv', 'getenv', (['"""DJANGO_SECRET_KEY"""'], {}), "('DJANGO_SECRET_KEY')\n", (143, 164), False, 'from os import getenv\n'), ((1776, 1798), 'os.getenv', 'getenv', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (1782, 1798), False, 'from os import getenv\n'), ((174, 189), 'os.getenv', 'getenv', (['"""DEBUG"""'], {}), "('DEBUG')\n", (180, 189), False, 'from os import getenv\n'), ((1823, 1858), 'dj_database_url.parse', 'dj_database_url.parse', (['DATABASE_URL'], {}), '(DATABASE_URL)\n', (1844, 1858), False, 'import dj_database_url\n'), ((83, 97), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'from pathlib import Path\n')]
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.common import Configuration
from pyflink.table import EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase, get_private_field
class EnvironmentSettingsTests(PyFlinkTestCase):
def test_planner_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.check_blink_planner(environment_settings)
# test use_blink_planner
environment_settings = EnvironmentSettings.new_instance().use_blink_planner().build()
self.check_blink_planner(environment_settings)
# test use_any_planner
environment_settings = builder.use_any_planner().build()
self.check_any_planner(environment_settings)
def test_mode_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_streaming_mode
environment_settings = builder.in_streaming_mode().build()
self.assertTrue(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_streaming_mode()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_batch_mode
environment_settings = builder.in_batch_mode().build()
self.assertFalse(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_batch_mode()
self.assertFalse(environment_settings.is_streaming_mode())
def test_with_built_in_catalog_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_CATALOG = gateway.jvm.EnvironmentSettings.DEFAULT_BUILTIN_CATALOG
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), DEFAULT_BUILTIN_CATALOG)
environment_settings = builder.with_built_in_catalog_name("my_catalog").build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), "my_catalog")
def test_with_built_in_database_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_DATABASE = gateway.jvm.EnvironmentSettings.DEFAULT_BUILTIN_DATABASE
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_database_name(),
DEFAULT_BUILTIN_DATABASE)
environment_settings = builder.with_built_in_database_name("my_database").build()
self.assertEqual(environment_settings.get_built_in_database_name(), "my_database")
def test_to_configuration(self):
expected_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
config = expected_settings.to_configuration()
self.assertEqual("BATCH", config.get_string("execution.runtime-mode", "stream"))
def test_from_configuration(self):
config = Configuration()
config.set_string("execution.runtime-mode", "batch")
actual_setting = EnvironmentSettings.from_configuration(config)
self.assertFalse(actual_setting.is_streaming_mode(), "Use batch mode.")
def check_blink_planner(self, settings: EnvironmentSettings):
gateway = get_gateway()
CLASS_NAME = gateway.jvm.EnvironmentSettings.CLASS_NAME
builder = EnvironmentSettings.new_instance()
BLINK_PLANNER_FACTORY = get_private_field(builder._j_builder, "BLINK_PLANNER_FACTORY")
self.assertEqual(
settings._j_environment_settings.toPlannerProperties()[CLASS_NAME],
BLINK_PLANNER_FACTORY)
def check_any_planner(self, settings: EnvironmentSettings):
gateway = get_gateway()
CLASS_NAME = gateway.jvm.EnvironmentSettings.CLASS_NAME
self.assertTrue(
CLASS_NAME not in settings._j_environment_settings.toPlannerProperties())
|
[
"pyflink.table.EnvironmentSettings.in_batch_mode",
"pyflink.common.Configuration",
"pyflink.testing.test_case_utils.get_private_field",
"pyflink.table.EnvironmentSettings.in_streaming_mode",
"pyflink.table.EnvironmentSettings.from_configuration",
"pyflink.table.EnvironmentSettings.new_instance",
"pyflink.java_gateway.get_gateway"
] |
[((1279, 1313), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (1311, 1313), False, 'from pyflink.table import EnvironmentSettings\n'), ((1895, 1929), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (1927, 1929), False, 'from pyflink.table import EnvironmentSettings\n'), ((2330, 2369), 'pyflink.table.EnvironmentSettings.in_streaming_mode', 'EnvironmentSettings.in_streaming_mode', ([], {}), '()\n', (2367, 2369), False, 'from pyflink.table import EnvironmentSettings\n'), ((2628, 2663), 'pyflink.table.EnvironmentSettings.in_batch_mode', 'EnvironmentSettings.in_batch_mode', ([], {}), '()\n', (2661, 2663), False, 'from pyflink.table import EnvironmentSettings\n'), ((2798, 2811), 'pyflink.java_gateway.get_gateway', 'get_gateway', ([], {}), '()\n', (2809, 2811), False, 'from pyflink.java_gateway import get_gateway\n'), ((2922, 2956), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (2954, 2956), False, 'from pyflink.table import EnvironmentSettings\n'), ((3440, 3453), 'pyflink.java_gateway.get_gateway', 'get_gateway', ([], {}), '()\n', (3451, 3453), False, 'from pyflink.java_gateway import get_gateway\n'), ((3566, 3600), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (3598, 3600), False, 'from pyflink.table import EnvironmentSettings\n'), ((4375, 4390), 'pyflink.common.Configuration', 'Configuration', ([], {}), '()\n', (4388, 4390), False, 'from pyflink.common import Configuration\n'), ((4478, 4524), 'pyflink.table.EnvironmentSettings.from_configuration', 'EnvironmentSettings.from_configuration', (['config'], {}), '(config)\n', (4516, 4524), False, 'from pyflink.table import EnvironmentSettings\n'), ((4690, 4703), 'pyflink.java_gateway.get_gateway', 'get_gateway', ([], {}), '()\n', (4701, 4703), False, 'from pyflink.java_gateway import get_gateway\n'), ((4787, 4821), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (4819, 4821), False, 'from pyflink.table import EnvironmentSettings\n'), ((4854, 4916), 'pyflink.testing.test_case_utils.get_private_field', 'get_private_field', (['builder._j_builder', '"""BLINK_PLANNER_FACTORY"""'], {}), "(builder._j_builder, 'BLINK_PLANNER_FACTORY')\n", (4871, 4916), False, 'from pyflink.testing.test_case_utils import PyFlinkTestCase, get_private_field\n'), ((5142, 5155), 'pyflink.java_gateway.get_gateway', 'get_gateway', ([], {}), '()\n', (5153, 5155), False, 'from pyflink.java_gateway import get_gateway\n'), ((1570, 1604), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (1602, 1604), False, 'from pyflink.table import EnvironmentSettings\n'), ((4114, 4148), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (4146, 4148), False, 'from pyflink.table import EnvironmentSettings\n')]
|
from typing import Tuple, Any
from enum import Enum, IntFlag
from datetime import datetime
from collections import namedtuple
from collections.abc import Callable
GRC_TPS = 0x0000 # main return codes (identical to RC_SUP!!)
GRC_SUP = 0x0000 # supervisor task (identical to RCBETA!!)
GRC_ANG = 0x0100 # angle- and inclination
GRC_ATA = 0x0200 # automatic target acquisition
GRC_EDM = 0x0300 # electronic distance meter
GRC_GMF = 0x0400 # geodesy mathematics & formulas
GRC_TMC = 0x0500 # measurement & calc
GRC_MEM = 0x0600 # memory management
GRC_MOT = 0x0700 # motorization
GRC_LDR = 0x0800 # program loader
GRC_BMM = 0x0900 # basics of man machine interface
GRC_TXT = 0x0A00 # text management
GRC_MMI = 0x0B00 # man machine interface
GRC_COM = 0x0C00 # communication
GRC_DBM = 0x0D00 # data base management
GRC_DEL = 0x0E00 # dynamic event logging
GRC_FIL = 0x0F00 # file system
GRC_CSV = 0x1000 # central services
GRC_CTL = 0x1100 # controlling task
GRC_STP = 0x1200 # start + stop task
GRC_DPL = 0x1300 # data pool
GRC_WIR = 0x1400 # wi registration
GRC_USR = 0x2000 # user task
GRC_ALT = 0x2100 # alternate user task
GRC_AUT = 0x2200 # automatization
GRC_AUS = 0x2300 # alternative user
GRC_BAP = 0x2400 # basic applications
GRC_SAP = 0x2500 # system applications
GRC_COD = 0x2600 # standard code function
GRC_BAS = 0x2700 # GeoBasic interpreter
GRC_IOS = 0x2800 # Input-/ output- system
GRC_CNF = 0x2900 # configuration facilities
GRC_XIT = 0x2E00 # XIT subsystem (Excite-Level LIS)
GRC_DNA = 0x2F00 # DNA2 subsystem
GRC_ICD = 0x3000 # cal data management
GRC_KDM = 0x3100 # keyboard display module
GRC_LOD = 0x3200 # firmware loader
GRC_FTR = 0x3300 # file transfer
GRC_VNF = 0x3F00 # reserved for new TPS1200 subsystem
GRC_GPS = 0x4000 # GPS subsystem
GRC_TST = 0x4100 # Test subsystem
GRC_PTF = 0x4F00 # reserved for new GPS1200 subsystem
GRC_APP = 0x5000 # offset for all applications
GRC_RES = 0x7000 # reserved code range
class ReturnCode(Enum):
GRC_OK = GRC_TPS + 0 # Function successfully completed.
GRC_UNDEFINED = GRC_TPS + 1 # Unknown error result unspecified.
GRC_IVPARAM = GRC_TPS + 2 # Invalid parameter detected.\nResult unspecified.
GRC_IVRESULT = GRC_TPS + 3 # Invalid result.
GRC_FATAL = GRC_TPS + 4 # Fatal error.
GRC_NOT_IMPL = GRC_TPS + 5 # Not implemented yet.
GRC_TIME_OUT = GRC_TPS + 6 # Function execution timed out.\nResult unspecified.
GRC_SET_INCOMPL = GRC_TPS + 7 # Parameter setup for subsystem is incomplete.
GRC_ABORT = GRC_TPS + 8 # Function execution has been aborted.
GRC_NOMEMORY = GRC_TPS + 9 # Fatal error - not enough memory.
GRC_NOTINIT = GRC_TPS + 10 # Fatal error - subsystem not initialized.
GRC_SHUT_DOWN = GRC_TPS + 12 # Subsystem is down.
GRC_SYSBUSY = GRC_TPS + 13 # System busy/already in use of another process.\nCannot execute function.
GRC_HWFAILURE = GRC_TPS + 14 # Fatal error - hardware failure.
GRC_ABORT_APPL = GRC_TPS + 15 # Execution of application has been aborted (SHIFT-ESC).
GRC_LOW_POWER = GRC_TPS + 16 # Operation aborted - insufficient power supply level.
GRC_IVVERSION = GRC_TPS + 17 # Invalid version of file ...
GRC_BATT_EMPTY = GRC_TPS + 18 # Battery empty
GRC_NO_EVENT = GRC_TPS + 20 # no event pending.
GRC_OUT_OF_TEMP = GRC_TPS + 21 # out of temperature range
GRC_INSTRUMENT_TILT = GRC_TPS + 22 # intrument tilting out of range
GRC_COM_SETTING = GRC_TPS + 23 # communication error
GRC_NO_ACTION = GRC_TPS + 24 # GRC_TYPE Input 'do no action'
GRC_SLEEP_MODE = GRC_TPS + 25 # Instr. run into the sleep mode
GRC_NOTOK = GRC_TPS + 26 # Function not successfully completed.
GRC_NA = GRC_TPS + 27 # Not available
GRC_OVERFLOW = GRC_TPS + 28 # Overflow error
GRC_STOPPED = GRC_TPS + 29 # System or subsystem has been stopped
GRC_COM_ERO = GRC_COM + 0 # Initiate Extended Runtime Operation (ERO).
GRC_COM_CANT_ENCODE = GRC_COM + 1 # Cannot encode arguments in client.
GRC_COM_CANT_DECODE = GRC_COM + 2 # Cannot decode results in client.
GRC_COM_CANT_SEND = GRC_COM + 3 # Hardware error while sending.
GRC_COM_CANT_RECV = GRC_COM + 4 # Hardware error while receiving.
GRC_COM_TIMEDOUT = GRC_COM + 5 # Request timed out.
GRC_COM_WRONG_FORMAT = GRC_COM + 6 # Packet format error.
GRC_COM_VER_MISMATCH = GRC_COM + 7 # Version mismatch between client and server.
GRC_COM_CANT_DECODE_REQ = GRC_COM + 8 # Cannot decode arguments in server.
GRC_COM_PROC_UNAVAIL = GRC_COM + 9 # Unknown RPC, procedure ID invalid.
GRC_COM_CANT_ENCODE_REP = GRC_COM + 10 # Cannot encode results in server.
GRC_COM_SYSTEM_ERR = GRC_COM + 11 # Unspecified generic system error.
GRC_COM_UNKNOWN_HOST = GRC_COM + 12 # (Unused error code)
GRC_COM_FAILED = GRC_COM + 13 # Unspecified error.
GRC_COM_NO_BINARY = GRC_COM + 14 # Binary protocol not available.
GRC_COM_INTR = GRC_COM + 15 # Call interrupted.
GRC_COM_UNKNOWN_ADDR = GRC_COM + 16 # (Unused error code)
GRC_COM_NO_BROADCAST = GRC_COM + 17 # (Unused error code)
GRC_COM_REQUIRES_8DBITS = GRC_COM + 18 # Protocol needs 8bit encoded chararacters.
GRC_COM_UD_ERROR = GRC_COM + 19 # (Unused error code)
GRC_COM_LOST_REQ = GRC_COM + 20 # (Unused error code)
GRC_COM_TR_ID_MISMATCH = GRC_COM + 21 # Transacation ID mismatch error.
GRC_COM_NOT_GEOCOM = GRC_COM + 22 # Protocol not recognizeable.
GRC_COM_UNKNOWN_PORT = GRC_COM + 23 # (WIN) Invalid port address.
GRC_COM_ILLEGAL_TRPT_SELECTOR = GRC_COM + 24 # (Unused error code)
GRC_COM_TRPT_SELECTOR_IN_USE = GRC_COM + 25 # (Unused error code)
GRC_COM_INACTIVE_TRPT_SELECTOR = GRC_COM + 26 # (Unused error code)
GRC_COM_ERO_END = GRC_COM + 27 # ERO is terminating.
GRC_COM_OVERRUN = GRC_COM + 28 # Internal error: data buffer overflow.
GRC_COM_SRVR_RX_CHECKSUM_ERROR = GRC_COM + 29 # Invalid checksum on server side received.
GRC_COM_CLNT_RX_CHECKSUM_ERROR = GRC_COM + 30 # Invalid checksum on client side received.
GRC_COM_PORT_NOT_AVAILABLE = GRC_COM + 31 # (WIN) Port not available.
GRC_COM_PORT_NOT_OPEN = GRC_COM + 32 # (WIN) Port not opened.
GRC_COM_NO_PARTNER = GRC_COM + 33 # (WIN) Unable to find TPS.
GRC_COM_ERO_NOT_STARTED = GRC_COM + 34 # Extended Runtime Operation could not be started.
GRC_COM_CONS_REQ = GRC_COM + 35 # Att to send cons reqs
GRC_COM_SRVR_IS_SLEEPING = GRC_COM + 36 # TPS has gone to sleep. Wait and try again.
GRC_COM_SRVR_IS_OFF = GRC_COM + 37 # TPS has shut down. Wait and try again.
GRC_EDM_SYSTEM_ERR = GRC_EDM + 1 # Fatal EDM sensor error. See for the exact reason the original EDM sensor error number. In the most cases a service problem.
# Sensor user errors
GRC_EDM_INVALID_COMMAND = GRC_EDM + 2 # Invalid command or unknown command, see command syntax.
GRC_EDM_BOOM_ERR = GRC_EDM + 3 # Boomerang error.
GRC_EDM_SIGN_LOW_ERR = GRC_EDM + 4 # Received signal to low, prisma to far away, or natural barrier, bad environment, etc.
GRC_EDM_DIL_ERR = GRC_EDM + 5 # Obsolete
GRC_EDM_SIGN_HIGH_ERR = GRC_EDM + 6 # Received signal to strong, prism too near, stranger light effect.
# New TPS1200 sensor user errors
GRC_EDM_TIMEOUT = GRC_EDM + 7 # Timeout, measuring time exceeded (signal too weak, beam interrupted,..)
GRC_EDM_FLUKT_ERR = GRC_EDM + 8 # Too much turbulences or distractions
GRC_EDM_FMOT_ERR = GRC_EDM + 9 # Filter motor defective
# Subsystem errors
GRC_EDM_DEV_NOT_INSTALLED = GRC_EDM + 10 # Device like EGL, DL is not installed.
GRC_EDM_NOT_FOUND = GRC_EDM + 11 # Search result invalid. For the exact explanation \nsee in the description of the called function.
GRC_EDM_ERROR_RECEIVED = GRC_EDM + 12 # Communication ok, but an error\nreported from the EDM sensor.
GRC_EDM_MISSING_SRVPWD = GRC_EDM + 13 # No service password is set.
GRC_EDM_INVALID_ANSWER = GRC_EDM + 14 # Communication ok, but an unexpected\nanswer received.
GRC_EDM_SEND_ERR = GRC_EDM + 15 # Data send error, sending buffer is full.
GRC_EDM_RECEIVE_ERR = GRC_EDM + 16 # Data receive error, like\nparity buffer overflow.
GRC_EDM_INTERNAL_ERR = GRC_EDM + 17 # Internal EDM subsystem error.
GRC_EDM_BUSY = GRC_EDM + 18 # Sensor is working already,\nabort current measuring first.
GRC_EDM_NO_MEASACTIVITY = GRC_EDM + 19 # No measurement activity started.
GRC_EDM_CHKSUM_ERR = GRC_EDM + 20 # Calculated checksum, resp. received data wrong\n(only in binary communication mode possible).
GRC_EDM_INIT_OR_STOP_ERR = GRC_EDM + 21 # During start up or shut down phase an\nerror occured. It is saved in the DEL buffer.
GRC_EDM_SRL_NOT_AVAILABLE = GRC_EDM + 22 # Red laser not available on this sensor HW.
GRC_EDM_MEAS_ABORTED = GRC_EDM + 23 # Measurement will be aborted (will be used for the lasersecurity)
# New TPS1200 sensor user error
GRC_EDM_SLDR_TRANSFER_PENDING = GRC_EDM + 30 # Multiple OpenTransfer calls.
GRC_EDM_SLDR_TRANSFER_ILLEGAL = GRC_EDM + 31 # No opentransfer happened.
GRC_EDM_SLDR_DATA_ERROR = GRC_EDM + 32 # Unexpected data format received.
GRC_EDM_SLDR_CHK_SUM_ERROR = GRC_EDM + 33 # Checksum error in transmitted data.
GRC_EDM_SLDR_ADDR_ERROR = GRC_EDM + 34 # Address out of valid range.
GRC_EDM_SLDR_INV_LOADFILE = GRC_EDM + 35 # Firmware file has invalid format.
GRC_EDM_SLDR_UNSUPPORTED = GRC_EDM + 36 # Current (loaded) firmware doesn't support upload.
GRC_EDM_UNKNOW_ERR = GRC_EDM + 40 # Undocumented error from the\nEDM sensor, should not occur.
GRC_EDM_DISTRANGE_ERR = GRC_EDM + 50 # Out of distance range (dist too small or large)
GRC_EDM_SIGNTONOISE_ERR = GRC_EDM + 51 # Signal to noise ratio too small
GRC_EDM_NOISEHIGH_ERR = GRC_EDM + 52 # Noise to high
GRC_EDM_PWD_NOTSET = GRC_EDM + 53 # Password is not set
GRC_EDM_ACTION_NO_MORE_VALID = GRC_EDM + 54 # Elapsed time between prepare und start fast measurement for ATR to long
GRC_EDM_MULTRG_ERR = GRC_EDM + 55 # Possibly more than one target (also a sensor error)
GRC_MOT_UNREADY = GRC_MOT + 0 # motorization is not ready (1792)
GRC_MOT_BUSY = GRC_MOT + 1 # motorization is handling another task (1793)
GRC_MOT_NOT_OCONST = GRC_MOT + 2 # motorization is not in velocity mode (1794)
GRC_MOT_NOT_CONFIG = GRC_MOT + 3 # motorization is in the wrong mode or busy (1795)
GRC_MOT_NOT_POSIT = GRC_MOT + 4 # motorization is not in posit mode (1796)
GRC_MOT_NOT_SERVICE = GRC_MOT + 5 # motorization is not in service mode (1797)
GRC_MOT_NOT_BUSY = GRC_MOT + 6 # motorization is handling no task (1798)
GRC_MOT_NOT_LOCK = GRC_MOT + 7 # motorization is not in tracking mode (1799)
GRC_MOT_NOT_SPIRAL = GRC_MOT + 8 # motorization is not in spiral mode (1800)
GRC_TMC_NO_FULL_CORRECTION = GRC_TMC + 3 # Warning: measurment without full correction
GRC_TMC_ACCURACY_GUARANTEE = GRC_TMC + 4 # Info : accuracy can not be guarantee
GRC_TMC_ANGLE_OK = GRC_TMC + 5 # Warning: only angle measurement valid
GRC_TMC_ANGLE_NOT_FULL_CORR = GRC_TMC + 8 # Warning: only angle measurement valid but without full correction
GRC_TMC_ANGLE_NO_ACC_GUARANTY = GRC_TMC + 9 # Info : only angle measurement valid but accuracy can not be guarantee
GRC_TMC_ANGLE_ERROR = GRC_TMC + 10 # Error : no angle measurement
GRC_TMC_DIST_PPM = GRC_TMC + 11 # Error : wrong setting of PPM or MM on EDM
GRC_TMC_DIST_ERROR = GRC_TMC + 12 # Error : distance measurement not done (no aim, etc.)
GRC_TMC_BUSY = GRC_TMC + 13 # Error : system is busy (no measurement done)
GRC_TMC_SIGNAL_ERROR = GRC_TMC + 14 # Error : no signal on EDM (only in signal mode)
GRC_BMM_XFER_PENDING = GRC_BMM + 1 # Loading process already opened
GRC_BMM_NO_XFER_OPEN = GRC_BMM + 2 # Transfer not opened
GRC_BMM_UNKNOWN_CHARSET = GRC_BMM + 3 # Unknown character set
GRC_BMM_NOT_INSTALLED = GRC_BMM + 4 # Display module not present
GRC_BMM_ALREADY_EXIST = GRC_BMM + 5 # Character set already exists
GRC_BMM_CANT_DELETE = GRC_BMM + 6 # Character set cannot be deleted
GRC_BMM_MEM_ERROR = GRC_BMM + 7 # Memory cannot be allocated
GRC_BMM_CHARSET_USED = GRC_BMM + 8 # Character set still used
GRC_BMM_CHARSET_SAVED = GRC_BMM + 9 # Charset cannot be deleted or is protected
GRC_BMM_INVALID_ADR = GRC_BMM + 10 # Attempt to copy a character block\noutside the allocated memory
GRC_BMM_CANCELANDADR_ERROR = GRC_BMM + 11 # Error during release of allocated memory
GRC_BMM_INVALID_SIZE = GRC_BMM + 12 # Number of bytes specified in header\ndoes not match the bytes read
GRC_BMM_CANCELANDINVSIZE_ERROR = GRC_BMM + 13 # Allocated memory could not be released
GRC_BMM_ALL_GROUP_OCC = GRC_BMM + 14 # Max. number of character sets already loaded
GRC_BMM_CANT_DEL_LAYERS = GRC_BMM + 15 # Layer cannot be deleted
GRC_BMM_UNKNOWN_LAYER = GRC_BMM + 16 # Required layer does not exist
GRC_BMM_INVALID_LAYERLEN = GRC_BMM + 17 # Layer length exceeds maximum
AUT_RC_TIMEOUT = GRC_AUT + 4 # Timeout, no target found
AUT_RC_DETENT_ERROR = GRC_AUT + 5 #
AUT_RC_ANGLE_ERROR = GRC_AUT + 6 #
AUT_RC_MOTOR_ERROR = GRC_AUT + 7 # Motorisation error
AUT_RC_INCACC = GRC_AUT + 8 #
AUT_RC_DEV_ERROR = GRC_AUT + 9 # Deviation measurement error
AUT_RC_NO_TARGET = GRC_AUT + 10 # No target detected
AUT_RC_MULTIPLE_TARGETS = GRC_AUT + 11 # Multiple targets detected
AUT_RC_BAD_ENVIRONMENT = GRC_AUT + 12 # Bad environment conditions
AUT_RC_DETECTOR_ERROR = GRC_AUT + 13 #
AUT_RC_NOT_ENABLED = GRC_AUT + 14 #
AUT_RC_CALACC = GRC_AUT + 15 #
AUT_RC_ACCURACY = GRC_AUT + 16 # Position not exactly reached
class byte(int):
def __new__(cls, value, *args, **kwargs):
if (type(value) == str):
value = int(value.strip("'"), 16)
elif (type(value) == bytes):
value = int(value.strip(b"'"), 16)
if value < 0:
raise ValueError("byte types must not be less than zero")
if value > 255:
raise ValueError("byte types must not be more than 255 (0xff)")
return super(cls, cls).__new__(cls, value)
class PrismType(Enum):
LEICA_ROUND = 0 # Prism type: Leica circular prism
LEICA_MINI = 1 # Prism type: Leica mini prism
LEICA_TAPE = 2 # Prism type: Leica reflective tape
LEICA_360 = 3 # Prism type: Leica 360° prism
USER1 = 4 # Prism type: User defined 1
USER2 = 5 # Prism type: User defined 2
USER3 = 6 # Prism type: User defined 3
LEICA_360_MINI = 7 # Prism type: Leica 360° mini
LEICA_MINI_ZERO = 8 # Prism type: Leica mini zero
LEICA_USER = 9 # Prism type: user???
LEICA_HDS_TAPE = 10 # Prism type: tape cyra???
LEICA_GRZ121_ROUND = 11 # Prism type: Leica GRZ121 round for machine guidance
class ReflectorType(Enum):
UNDEFINED = 0
PRISM = 1
TAPE = 2
class TargetType(Enum):
REFLECTOR = 0
REFLECTORLESS = 1
class InclinationSensorProgram(Enum):
TMC_MEA_INC = 0 # Use sensor (apriori sigma)
TMC_AUTO_INC = 1 # Automatic mode (sensor/plane)
TMC_PLANE_INC = 2 # Use plane (apriori sigma)
class EDMMode(Enum):
EDM_MODE_NOT_USED = 0 # Init value
EDM_SINGLE_TAPE = 1 # Single measurement with tape
EDM_SINGLE_STANDARD = 2 # Standard single measurement
EDM_SINGLE_FAST = 3 # Fast single measurement
EDM_SINGLE_LRANGE = 4 # Long range single measurement
EDM_SINGLE_SRANGE = 5 # Short range single measurement
EDM_CONT_STANDARD = 6 # Standard repeated measurement
EDM_CONT_DYNAMIC = 7 # Dynamic repeated measurement
EDM_CONT_REFLESS = 8 # Reflectorless repeated measurement
EDM_CONT_FAST = 9 # Fast repeated measurement
EDM_AVERAGE_IR = 10 # Standard average measurement
EDM_AVERAGE_SR = 11 # Short range average measurement
EDM_AVERAGE_LR = 12 # Long range average measurement
class DeviceClass(Enum):
# TPS1000 Family ------------------------ accuracy
TPS_CLASS_1100 = 0 # TPS1000 family member, 1 mgon, 3"
TPS_CLASS_1700 = 1 # TPS1000 family member, 0.5 mgon, 1.5"
TPS_CLASS_1800 = 2 # TPS1000 family member, 0.3 mgon, 1"
TPS_CLASS_5000 = 3 # TPS2000 family member
TPS_CLASS_6000 = 4 # TPS2000 family member
TPS_CLASS_1500 = 5 # TPS1000 family member
TPS_CLASS_2003 = 6 # TPS2000 family member
TPS_CLASS_5005 = 7 # TPS5000 "
TPS_CLASS_5100 = 8 # TPS5000 "
# TPS1100 Family ------------------------ accuracy
TPS_CLASS_1102 = 100 # TPS1000 family member, 2"
TPS_CLASS_1103 = 101 # TPS1000 family member, 3"
TPS_CLASS_1105 = 102 # TPS1000 family member, 5"
TPS_CLASS_1101 = 103 # TPS1000 family member, 1."
# TPS1200 Family ------------------------ accuracy
TPS_CLASS_1202 = 200 # TPS1200 family member, 2"
TPS_CLASS_1203 = 201 # TPS1200 family member, 3"
TPS_CLASS_1205 = 202 # TPS1200 family member, 5"
TPS_CLASS_1201 = 203 # TPS1200 family member, 1"
class DeviceType(IntFlag):
# TPS1x00 common
TPS_DEVICE_T = 0x00000 # Theodolite without built-in EDM
TPS_DEVICE_MOT = 0x00004 # Motorized device
TPS_DEVICE_ATR = 0x00008 # Automatic Target Recognition
TPS_DEVICE_EGL = 0x00010 # Electronic Guide Light
TPS_DEVICE_DB = 0x00020 # reserved (Database, not GSI)
TPS_DEVICE_DL = 0x00040 # Diode laser
TPS_DEVICE_LP = 0x00080 # Laser plumbed
# TPS1000 specific
TPS_DEVICE_TC1 = 0x00001 # tachymeter (TCW1)
TPS_DEVICE_TC2 = 0x00002 # tachymeter (TCW2)
# TPS1100/TPS1200 specific
TPS_DEVICE_TC = 0x00001 # tachymeter (TCW3)
TPS_DEVICE_TCR = 0x00002 # tachymeter (TCW3 with red laser)
TPS_DEVICE_ATC = 0x00100 # Autocollimation lamp (used only PMU)
TPS_DEVICE_LPNT = 0x00200 # Laserpointer
TPS_DEVICE_RL_EXT = 0x00400 # Reflectorless EDM with extended range (Pinpoint R100,R300)
TPS_DEVICE_PS = 0x00800 # Power Search
# TPSSim specific
TPS_DEVICE_SIM = 0x04000 # runs on Simulation, no Hardware
class PowerPath(Enum):
CURRENT_POWER = 0
EXTERNAL_POWER = 1
INTERNAL_POWER = 2
class RecordFormat(Enum):
GSI_8 = 0
GSI_16 = 1
class TPSStatus(Enum):
OFF = 0
SLEEPING = 1
ONLINE = 2
LOCAL = 3
UNKNOWN = 4
class OnOff(Enum):
OFF = 0
ON = 1
class EGLIntensity(Enum):
OFF = 0
LOW = 1
MID = 2
HIGH = 3
class ControllerMode(Enum):
RELATIVE_POSITIONING = 0
CONSTANT_SPEED = 1
MANUAL_POSITIONING = 2
LOCK_IN = 3
BRAKE = 4
TERMINATE = 7
class ControllerStopMode(Enum):
NORMAL = 0
SHUTDOWN = 1
class LockInStatus(Enum):
LOCKED_OUT = 0
LOCKED_IN = 1
PREDICTION = 2
class MeasurementMode(Enum):
NO_MEASUREMENTS = 0 # No measurements, take last one
NO_DISTANCE = 1 # No distance measurement, angles only
DEFAULT_DISTANCE = 2 # Default distance measurements, pre-defined using MeasurementProgram
CLEAR_DISTANCE = 5 # Clear distances
STOP_TRACKING = 6 # Stop tracking laser
class MeasurementProgram(Enum):
SINGLE_REF_STANDARD = 0 # standard single IR distance with reflector
SINGLE_REF_FAST = 1 # fast single IR distance with reflector
SINGLE_REF_VISIBLE = 2 # long range distance with reflector (red laser)
SINGLE_RLESS_VISIBLE = 3 # single RL distance reflector free (red laser)
CONT_REF_STANDARD = 4 # tracking IR distance with reflector
CONT_REF_FAST = 5 # fast tracking IR distance with reflector
CONT_RLESS_VISIBLE = 6 # fast tracking RL distance reflector free (red)
AVG_REF_STANDARD = 7 # Average IR distance with reflector
AVG_REF_VISIBLE = 8 # Average long range dist. with reflector (red)
AVG_RLESS_VISIBLE = 9 # Average RL distance reflector free (red laser)
class PositionMode(Enum):
NORMAL = 0
PRECISE = 1
class FineAdjustPositionMode(Enum):
NORM = 0 # Angle tolerance
POINT = 1 # Point tolerance
DEFINE = 2 # System independent positioning tolerance; set wit PyGeoCom.set_tolerance()
class ATRRecognitionMode(Enum):
POSITION = 0 # Positioning to the horizontal and vertical angle
TARGET = 1 # Positioning to a target in the environment of the horizontal and vertical angle
class TMCInclinationMode(Enum):
USE_SENSOR = 0
AUTOMATIC = 1
USE_PLANE = 2
class TMCMeasurementMode(Enum):
STOP = 0 # Stop measurement program
DEFAULT_DISTANCE = 1 # Default DIST-measurement program
DISTANCE_TRACKING = 2 # Distance-TRK measurement program
STOP_AND_CLEAR = 3 # TMC_STOP and clear data
SIGNAL = 4 # Signal measurement (test function)
RESTART = 6 # (Re)start measurement task
DISTANCE_RAPID_TRACKING = 8 # Distance-TRK measurement program
RED_LASER_TRACKING = 10 # Red laser tracking
TESTING_FREQUENCY = 11 # Frequency measurement (test)
class EDMMeasurementMode(Enum):
MODE_NOT_USER = 0
SINGLE_TAPE = 1
SINGLE_STANDARD = 2
SINGLE_FAST = 3
SINGLE_LRANGE = 4
SINGLE_SRANGE = 5
CONTINUOUS_STANDARD = 6
CONTINUOUS_DYNAMIC = 7
CONTINUOUS_REFLECTORLESS = 8
CONTINUOUS_FAST = 9
AVERAGE_IR = 10
AVERAGE_SR = 11
AVERAGE_LR = 12
class FacePosition(Enum):
NORMAL = 0
TURNED = 1
class ActualFace(Enum):
FACE_1 = 0
FACE_2 = 1
Coordinate = namedtuple('Coordinate', 'east north head')
Angles = namedtuple('Angles', 'hz, v')
def decode_string(data: bytes) -> str:
return data.decode('unicode_escape').strip('"')
def default_return_code_handler(return_code: int):
if (return_code != ReturnCode.GRC_OK):
raise Exception(return_code)
def noop_return_code_handler(return_code: int):
return
class PyGeoCom:
def __init__(self, stream, debug: bool = False):
self._stream = stream
self._stream.write(b'\n')
self._debug = debug
def _request(self, rpc_id: int, args: Tuple[Any, ...] = (), return_code_handler: Callable[[int], None] = default_return_code_handler) -> Tuple[Any, ...]:
def encode(arg) -> str:
if (type(arg) == str):
return '"{}"'.format(arg)
elif (type(arg) == int):
return '{}'.format(arg)
elif (type(arg) == float):
return '{}'.format(arg)
elif (type(arg) == bool):
return '1' if arg == True else '0'
elif (type(arg) == byte):
return "'{:02X}'".format(arg)
d = '\n%R1Q,{}:{}\r\n'.format(rpc_id, ','.join([encode(a) for a in args])).encode('ascii')
if self._debug: print(b'>> ' + d)
self._stream.write(d)
d = self._stream.readline()
if self._debug: print(b'<< ' + d)
header, parameters = d.split(b':', 1)
reply_type, geocom_return_code, transaction_id = header.split(b',')
assert reply_type == b'%R1P'
geocom_return_code = int(geocom_return_code)
transaction_id = int(transaction_id)
parameters = parameters.rstrip()
rpc_return_code, *p = parameters.split(b',')
rpc_return_code = ReturnCode(int(rpc_return_code))
return_code_handler(rpc_return_code)
return (geocom_return_code, rpc_return_code) + tuple(p)
def get_instrument_number(self) -> int:
_, _, instrument_number, = self._request(5003)
return int(instrument_number)
def get_instrument_name(self) -> str:
_, _, instrument_name = self._request(5004)
return decode_string(instrument_name)
def get_device_config(self) -> DeviceType:
_, _, device_class, device_type = self._request(5035)
return DeviceClass(int(device_class)), DeviceType(int(device_type))
def get_date_time(self) -> datetime:
_, _, year, month, day, hour, minute, second = self._request(5008)
year = int(year)
month = byte(month)
day = byte(day)
hour = byte(hour)
minute = byte(minute)
second = byte(second)
return datetime(year, month, day, hour, minute, second)
def set_date_time(self, dt: datetime):
self._request(5007, (dt.year, byte(dt.month), byte(dt.day), byte(dt.hour), byte(dt.minute), byte(dt.second)))
def get_software_version(self) -> Tuple[int, int, int]:
_, _, release, version, subversion = self._request(5034)
return int(release), int(version), int(subversion)
def check_power(self) -> Tuple[int, PowerPath, PowerPath]:
_, _, capacity, active_power, power_suggest = self._request(5039)
return int(capacity), PowerPath(int(active_power)), PowerPath(int(power_suggest))
def get_memory_voltage(self) -> float:
_, _, memory_voltage = self._request(5010)
return float(memory_voltage)
def get_internal_temperature(self) -> float:
_, _, internal_temperature = self._request(5011)
return float(internal_temperature)
def get_up_counter(self) -> Tuple[int, int]:
_, _, power_on, wake_up = self._request(12003)
return int(power_on), int(wake_up)
def get_binary_available(self) -> bool:
_, _, binary_available, = self._request(113)
return bool(binary_available)
def get_record_format(self) -> RecordFormat:
_, _, record_format, = self._request(8011)
return RecordFormat(int(record_format)),
def set_record_format(self, record_format: RecordFormat):
self._request(8012, (record_format.value,))
def get_double_precision_setting(self) -> int:
_, _, number_of_digits, = self._request(108)
return int(number_of_digits)
def set_double_precision_setting(self, number_of_digits: int):
if number_of_digits < 0:
raise ValueError("Number of digits must be greater than or equal to 0")
if number_of_digits > 15:
raise ValueError("Number of digits must be lesser than or equal to 15")
self._request(107, (number_of_digits,))
def laser_pointer(self, state: OnOff):
self._request(1004, (state.value,))
def laser_pointer_on(self):
self.laser_pointer(OnOff.ON)
def laser_pointer_off(self):
self.laser_pointer(OnOff.OFF)
# Not tested as I don't have a device with an EGL
def get_egl_intensity(self) -> EGLIntensity:
_, _, intensity, = self._request(1058)
return EGLIntensity(int(intensity))
# Not tested as I don't have a device with an EGL
def set_egl_intensity(self, intensity: EGLIntensity):
self._request(1059, (intensity.value,))
def get_motor_lock_status(self) -> LockInStatus:
_, _, motor_lock_status, = self._request(6021)
return LockInStatus(int(motor_lock_status))
def start_controller(self, controller_mode: ControllerMode):
self._request(6001, (controller_mode.value,))
def stop_controller(self, controller_stop_mode: ControllerStopMode):
self._request(6002, (controller_stop_mode.value,))
# Speed is in radians/second, with a maximum of ±0.79rad/s each
def set_velocity(self, hoziontal_speed: float, vertical_speed: float):
MAX_SPEED = 0.79 # rad/s
if abs(hoziontal_speed) > MAX_SPEED:
raise ValueError("Horizontal speed exceeds the ±0.79 range")
if abs(vertical_speed) > MAX_SPEED:
raise ValueError("Horizontal speed exceeds the ±0.79 range")
self._request(6004, (hoziontal_speed, vertical_speed))
def get_target_type(self) -> TargetType:
_, _, target_type, = self._request(17022)
return TargetType(int(target_type))
def set_target_type(self, target_type: TargetType):
self._request(17021, (target_type.value,))
def get_prism_type(self) -> PrismType:
_, _, prism_type, = self._request(17009)
return PrismType(int(prism_type))
def set_prism_type(self, prism_type: PrismType):
self._request(17008, (prism_type.value,))
def get_prism_definition(self, prism_type: PrismType) -> Tuple[str, float, ReflectorType]:
_, _, name, correction, reflector_type = self._request(17023, (prism_type.value,))
name = decode_string(name)
correction = float(correction)
reflector_type = ReflectorType(int(reflector_type))
return name, correction, reflector_type
def set_prism_definition(self, prism_type: PrismType, name: str, correction: float, reflector_type: ReflectorType):
self._request(17024, (prism_type.value, name, correction, reflector_type.value))
def get_measurement_program(self) -> MeasurementProgram:
_, _, measurement_program, = self._request(17018)
return MeasurementProgram(int(measurement_program))
def set_measurement_program(self, measurement_program: MeasurementProgram):
self._request(17019, (measurement_program.value,))
def measure_distance_and_angles(self, measurement_mode: MeasurementMode) -> Tuple[MeasurementMode, float, float, float]:
_, _, horizontal, vertical, distance, measurement_mode = self._request(17017, (measurement_mode.value,))
horizontal = float(horizontal)
vertical = float(vertical)
distance = float(distance)
measurement_mode = MeasurementMode(int(measurement_mode))
return measurement_mode, horizontal, vertical, distance
def search_target(self):
self._request(17020, (0,))
def get_server_software_version(self) -> Tuple[int, int, int]:
_, _, release, version, subversion = self._request(110)
return int(release), int(version), int(subversion)
def set_send_delay(self, delay_ms: int):
self._request(109, (delay_ms,))
def local_mode(self):
self._request(1)
def get_user_atr_state(self) -> OnOff:
_, _, atr_state, = self._request(18006)
return OnOff(int(atr_state))
def set_user_atr_state(self, atr_state: OnOff):
self._request(18005, (atr_state.value,))
def user_atr_state_on(self):
self.set_user_atr_state(OnOff.ON)
def user_atr_state_off(self):
self.set_user_atr_state(OnOff.OFF)
def get_user_lock_state(self) -> OnOff:
_, _, lock_state, = self._request(18008)
return OnOff(int(lock_state))
def set_user_lock_state(self, lock_state: OnOff):
self._request(18007, (lock_state.value,))
def user_lock_state_on(self):
self.set_user_lock_state(OnOff.ON)
def user_lock_state_off(self):
self.set_user_lock_state(OnOff.OFF)
def get_rcs_search_switch(self) -> OnOff:
"""This command gets the current RCS-Searching mode switch. If RCS style searching
is enabled, then the extended searching for BAP_SearchTarget or after a loss of
lock is activated. This command is valid for TCA instruments only.
:returns: state of the RCS searching switch
:rtype: OnOff
"""
_, _, search_switch, = self._request(18010)
return OnOff(int(search_switch))
def switch_rcs_search(self, search_switch: OnOff):
self._request(18009, (search_switch.value,))
def get_tolerance(self) -> Tuple[float, float]:
_, _, horizontal_tolerance, vertical_tolerance = self._request(9008)
return float(horizontal_tolerance), float(vertical_tolerance)
def set_tolerance(self, horizontal_tolerance: float, vertical_tolerance: float):
self._request(9007, (horizontal_tolerance, vertical_tolerance))
def get_positioning_timeout(self) -> Tuple[float, float]:
_, _, horizontal_timeout, vertical_timeout = self._request(9012)
return float(horizontal_timeout), float(vertical_timeout)
def set_positioning_timeout(self, horizontal_timeout: float, vertical_timeout: float):
self._request(9011, (horizontal_timeout, vertical_timeout))
def position(self, horizontal: float, vertical: float, position_mode: PositionMode = PositionMode.NORMAL, atr_mode: ATRRecognitionMode = ATRRecognitionMode.POSITION):
self._request(9027, (horizontal, vertical, position_mode.value, atr_mode.value, False))
def change_face(self, position_mode: PositionMode = PositionMode.NORMAL, atr_mode: ATRRecognitionMode = ATRRecognitionMode.POSITION):
self._request(9028, (position_mode.value, atr_mode.value, False))
def fine_adjust(self, horizontal_search_range: float, vertical_search_range: float):
self._request(9037, (horizontal_search_range, vertical_search_range, False))
def search(self, horizontal_search_range: float, vertical_search_range: float):
self._request(9029, (horizontal_search_range, vertical_search_range, False))
def get_fine_adjust_mode(self) -> FineAdjustPositionMode:
_, _, fine_adjust_mode, = self._request(9030)
return FineAdjustPositionMode(float(fine_adjust_mode))
def set_fine_adjust_mode(self, fine_adjust_mode: FineAdjustPositionMode):
self._request(9031, (fine_adjust_mode.value,))
def lock_in(self):
self._request(9013)
def get_search_area(self) -> Tuple[float, float, float, float, bool]:
_, _, horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled = self._request(9042)
horizontal_centre = float(horizontal_centre)
vertical_centre = float(vertical_centre)
horizontal_range = float(horizontal_range)
vertical_range = float(vertical_range)
enabled = bool(enabled)
return horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled
def set_search_area(self, horizontal_centre: float, vertical_centre: float, horizontal_range: float, vertical_range: float, enabled: bool):
self._request(9043, (horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled))
def get_search_spiral(self) -> Tuple[float, float]:
_, _, horizontal_range, vertical_range = self._request(9040)
return float(horizontal_range), float(vertical_range)
def set_search_spiral(self, horizontal_range: float, vertical_range: float):
self._request(9041, (horizontal_range, vertical_range))
def get_coordinate(self, inclination_mode: TMCInclinationMode, wait_time: int = 1000) -> Tuple[Coordinate, int, Coordinate, int]:
_, _, e, n, h, measure_time, e_cont, n_cont, h_cont, measure_time_cont = self._request(2082, (wait_time, inclination_mode.value), return_code_handler=noop_return_code_handler)
coordinate = Coordinate(float(e), float(n), float(h))
coordinate_cont = Coordinate(float(e_cont), float(n_cont), float(h_cont))
measure_time = int(measure_time)
measure_time_cont = int(measure_time_cont)
return coordinate, measure_time, coordinate_cont, measure_time_cont
def get_simple_measurement(self, inclination_mode: TMCInclinationMode, wait_time: int = 1000) -> Tuple[Angles, float]:
_, _, horizontal, vertical, slope_distance = self._request(2108, (wait_time, inclination_mode.value,))
angles = Angles(float(horizontal), float(vertical))
slope_distance = float(slope_distance)
return angles, slope_distance
def get_angles_simple(self, inclination_mode: TMCInclinationMode) -> Angles:
_, _, horizontal, vertical = self._request(2107, (inclination_mode.value,))
return Angles(float(horizontal), float(vertical))
def get_angles_complete(self, inclination_mode: TMCInclinationMode) -> Tuple[Angles, float, int, float, float, float, int, FacePosition]:
_, _, horizontal, vertical, angle_accuracy, angle_measure_time, cross_inclincation, length_inclination, incline_accuracy, incline_measurement_time, face_position = self._request(2003, (inclination_mode.value,))
angles = Angles(float(horizontal), float(vertical))
angle_accuracy = float(angle_accuracy)
angle_measure_time = float(angle_measure_time)
cross_inclincation = float(cross_inclincation)
length_inclination = float(length_inclination)
incline_accuracy = float(incline_accuracy)
incline_measurement_time = int(incline_measurement_time)
face_position = FacePosition(int(face_position))
return angles, angle_accuracy, angle_measure_time, cross_inclincation, length_inclination, incline_accuracy, incline_measurement_time, face_position
def do_measure(self, measurement_mode: TMCMeasurementMode, inclination_mode: TMCInclinationMode):
self._request(2008, (measurement_mode.value, inclination_mode.value,))
|
[
"collections.namedtuple",
"datetime.datetime"
] |
[((23265, 23308), 'collections.namedtuple', 'namedtuple', (['"""Coordinate"""', '"""east north head"""'], {}), "('Coordinate', 'east north head')\n", (23275, 23308), False, 'from collections import namedtuple\n'), ((23318, 23347), 'collections.namedtuple', 'namedtuple', (['"""Angles"""', '"""hz, v"""'], {}), "('Angles', 'hz, v')\n", (23328, 23347), False, 'from collections import namedtuple\n'), ((25943, 25991), 'datetime.datetime', 'datetime', (['year', 'month', 'day', 'hour', 'minute', 'second'], {}), '(year, month, day, hour, minute, second)\n', (25951, 25991), False, 'from datetime import datetime\n')]
|
'''Code from python notebook by simoninithomas
available at https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Q%20learning/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
import gym
import random
env = gym.make("FrozenLake-v0")
action_size = env.action_space.n
state_size = env.observation_space.n
qtable = np.zeros((state_size, action_size))
#print(qtable)
total_episodes = 10000 # Total episodes
learning_rate = 0.5 # Learning rate
max_steps = 50 # Max steps per episode
gamma = 0.95 # Discounting rate
# Exploration parameters
epsilon = 1.0 # Exploration rate
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.01 # Minimum exploration probability
decay_rate = 0.001 # Exponential decay rate for exploration prob
# List of rewards
rewards = []
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
total_rewards = 0
print("EPISODE",episode)
for step in range(max_steps):
# 3. Choose an action a in the current world state (s)
## First we randomize a number
exp_exp_tradeoff = random.uniform(0, 1)
## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[state,:])
#print("Let's exploit.", action)
env.render()
# Else doing a random choice --> exploration
else:
action = env.action_space.sample()
#print("Let's explore.",action)
env.render()
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
print("NEW STATE:",new_state,"REWARD:",reward)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
# qtable[new_state,:] : all the actions we can take from new state
qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])
print("QTABLE AT",state,qtable[state])
total_rewards += reward
# Our new state is state
state = new_state
# If done (if we're dead) : finish episode
if done == True:
print("GAME OVER.\n\n")
break
episode += 1
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
print(epsilon)
rewards.append(total_rewards)
print ("Score over time: " + str(sum(rewards)/total_episodes))
print(qtable)
env.reset()
for episode in range(0):
state = env.reset()
step = 0
done = False
print("****************************************************")
print("EPISODE ", episode)
for step in range(max_steps):
env.render()
# Take the action (index) that have the maximum expected future reward given that state
action = np.argmax(qtable[state,:])
new_state, reward, done, info = env.step(action)
if done:
break
state = new_state
env.close()
|
[
"gym.make",
"numpy.argmax",
"random.uniform",
"numpy.zeros",
"numpy.max",
"numpy.exp"
] |
[((252, 277), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (260, 277), False, 'import gym\n'), ((358, 393), 'numpy.zeros', 'np.zeros', (['(state_size, action_size)'], {}), '((state_size, action_size))\n', (366, 393), True, 'import numpy as np\n'), ((1299, 1319), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1313, 1319), False, 'import random\n'), ((3237, 3264), 'numpy.argmax', 'np.argmax', (['qtable[state, :]'], {}), '(qtable[state, :])\n', (3246, 3264), True, 'import numpy as np\n'), ((1499, 1526), 'numpy.argmax', 'np.argmax', (['qtable[state, :]'], {}), '(qtable[state, :])\n', (1508, 1526), True, 'import numpy as np\n'), ((2714, 2743), 'numpy.exp', 'np.exp', (['(-decay_rate * episode)'], {}), '(-decay_rate * episode)\n', (2720, 2743), True, 'import numpy as np\n'), ((2217, 2245), 'numpy.max', 'np.max', (['qtable[new_state, :]'], {}), '(qtable[new_state, :])\n', (2223, 2245), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# Play a game.
#
import raehutils
import sys, os, argparse, logging
class PlayPy(raehutils.RaehBaseClass):
ERR_MATCH = 1
def __init__(self):
retroarch_cores_dir = os.environ.get("HOME") + "/.config/retroarch/cores"
games_dir = os.environ.get("HOME") + "/media/games-local"
self.games = {
"tome4": {
"name": "Tales of Maj'Eyal",
"cmd": ["tome4"]
},
"pokemon-emerald-jp": {
"name": "Pokemon Emerald (JP)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/vbam_libretro.so",games_dir+"/gba/official/Pocket Monsters - Emerald (Japan).gba"]
},
"melee": {
"name": "Super Smash Bros. Melee (20XX) [UFC]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7-ucf-v0.73/dolphin-emu"]
},
"melee-no-ufc": {
"name": "Super Smash Bros. Melee (20XX)",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7/dolphin-emu"]
},
"melee-smashladder": {
"name": "Super Smash Bros. Melee [Netplay/Smashladder]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.9-fresh/dolphin-emu"]
},
"melee-uk-melee": {
"name": "Super Smash Bros. Melee [Netplay/UK Melee]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7-fresh-uk-melee-ucf-v0.73/dolphin-emu"]
},
"retroarch": {
"name": "RetroArch (general)",
"cmd": ["retroarch"]
},
"mario-and-luigi-rpg": {
"name": "Mario & Luigi RPG (JP)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/vbam_libretro.so",games_dir+"/gba/official/mario-and-luigi-rpg-jp/1283 - Mario and Luigi RPG (J)(Rising Sun).gba"]
},
"elite-nes-pal": {
"name": "Elite (NES) (PAL)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/fceumm_libretro.so",games_dir+"/nes/official/elite/elite-pal.nes"]
},
}
self.workspace_num = "10"
## CLI-related {{{
def _parse_args(self):
self.parser = argparse.ArgumentParser(description="Play a game.")
self.parser.add_argument("-v", "--verbose", help="be verbose", action="count", default=0)
self.parser.add_argument("-q", "--quiet", help="be quiet (overrides -v)", action="count", default=0)
self.parser.add_argument("game", help="unique string of game to play")
self.args = self.parser.parse_args()
self._parse_verbosity()
## }}}
def main(self):
"""Main entrypoint after program initialisation."""
# get all possible matches
matches = [k for k, v in self.games.items() if k.startswith(self.args.game)]
if len(matches) < 1:
self.fail("no matching games for query: {}".format(self.args.game), PlayPy.ERR_MATCH)
if len(matches) > 1:
# if we found an exact match, override
exact_match = list(filter(lambda x: x == self.args.game, matches))
if len(exact_match) == 1:
matches = exact_match
else:
self.fail("query matches multiple games with no exact match: {}".format(", ".join(matches), PlayPy.ERR_MATCH))
game = self.games[matches[0]]
self.logger.info("matched game: {}".format(game["name"]))
self.logger.info("game cmd: {}".format(" ".join(game["cmd"])))
self.start_game(game)
def start_game(self, game):
"""Start a game."""
self.switch_workspace(self.workspace_num)
self.run_game_cmd(game["cmd"])
#self.float_game_window()
def switch_workspace(self, workspace_num):
"""Switch i3 workspace to the given worksapce."""
cmd_switch_workspace = ["i3-msg", "workspace", workspace_num]
raehutils.get_shell(cmd_switch_workspace)
# sleep a TINY bit (Dolphin comes up before we switch, somehow??)
#raehutils.get_shell(["sleep", "0.1"])
def float_game_window(self):
"""Float the game window (i3)."""
cmd_float_window = ["i3-msg", "floating", "enable"]
# sleep for a bit first to wait for the window to come up
raehutils.get_shell(["sleep", "1"])
raehutils.get_shell(cmd_float_window)
def run_game_cmd(self, cmd):
"""Run a shell command to start a game and detach."""
raehutils.run_shell_detached(cmd)
# alternative: don't detach, return return code
# maybe useful as a switch
#return raehutils.drop_to_shell(cmd)
if __name__ == "__main__":
program = PlayPy()
program.run()
|
[
"raehutils.run_shell_detached",
"raehutils.get_shell",
"argparse.ArgumentParser",
"os.environ.get"
] |
[((2443, 2494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Play a game."""'}), "(description='Play a game.')\n", (2466, 2494), False, 'import sys, os, argparse, logging\n'), ((4154, 4195), 'raehutils.get_shell', 'raehutils.get_shell', (['cmd_switch_workspace'], {}), '(cmd_switch_workspace)\n', (4173, 4195), False, 'import raehutils\n'), ((4529, 4564), 'raehutils.get_shell', 'raehutils.get_shell', (["['sleep', '1']"], {}), "(['sleep', '1'])\n", (4548, 4564), False, 'import raehutils\n'), ((4573, 4610), 'raehutils.get_shell', 'raehutils.get_shell', (['cmd_float_window'], {}), '(cmd_float_window)\n', (4592, 4610), False, 'import raehutils\n'), ((4715, 4748), 'raehutils.run_shell_detached', 'raehutils.run_shell_detached', (['cmd'], {}), '(cmd)\n', (4743, 4748), False, 'import raehutils\n'), ((207, 229), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (221, 229), False, 'import sys, os, argparse, logging\n'), ((279, 301), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (293, 301), False, 'import sys, os, argparse, logging\n'), ((821, 843), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (835, 843), False, 'import sys, os, argparse, logging\n'), ((1060, 1082), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (1074, 1082), False, 'import sys, os, argparse, logging\n'), ((1309, 1331), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (1323, 1331), False, 'import sys, os, argparse, logging\n'), ((1556, 1578), 'os.environ.get', 'os.environ.get', (['"""HOME"""'], {}), "('HOME')\n", (1570, 1578), False, 'import sys, os, argparse, logging\n')]
|
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU
from modules import *
from tensorflow import keras
from tensorflow.keras.models import Model
# api functional model
def get_sasrec(maxlen, item_fea_col, embed_dim, embed_reg, dropout, num_heads, blocks, ffn_hidden_unit,
use_norm = True, causality = True):
# train_inputs = Input(shape=(206,), dtype=tf.float32, name = 'model_inputs') # (None, 206)
user_inputs = Input(shape=(170,), dtype=tf.float32, name='user_inputs') # (None, 170)
item_inputs = Input(shape=(36,), dtype=tf.float32, name='item_inputs') # (None, 36)
# split
tmp = tf.split(user_inputs, axis=1, num_or_size_splits=[20, 150])
user_feat_inputs, user_seq_inputs= tmp # (None,20) (None,150)
tmp = tf.split(item_inputs, axis=1, num_or_size_splits=[1, 35])
sample_cspuidx_inputs, item_feat_inputs = tmp # (None,1) (None,35)
### ********** ###
# user part
### ********** ###
new_seq_inputs = tf.cast(user_seq_inputs, dtype = tf.int32)
mask = tf.expand_dims(tf.cast(tf.not_equal(new_seq_inputs, 0), dtype=tf.float32), axis=-1) # (None, maxlen, 1)
item_embedding = Embedding(input_dim=item_fea_col['feat_num'],
input_length=1,
output_dim=embed_dim,
mask_zero=True,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
seq_embed = item_embedding(new_seq_inputs) # (None, 150, dim=50)
pos_embedding = Embedding(input_dim=maxlen,
input_length=1,
output_dim=embed_dim,
mask_zero=False,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
pos_encoding = tf.expand_dims(pos_embedding(tf.range(maxlen)), axis=0)
seq_embed += pos_encoding
seq_embed = Dropout(dropout)(seq_embed)
att_outputs = seq_embed # (None, maxlen, dim)
att_outputs *= mask
encoder_layer = [EncoderLayer(embed_dim, num_heads, ffn_hidden_unit,
dropout, use_norm, causality) for _ in range(blocks)]
for block in encoder_layer:
att_outputs = block([att_outputs, mask]) # (None, maxlen, dim)
att_outputs *= mask # (None, maxlen, dim)
seq_outputs = att_outputs[:, -1] # (None, dim) remain the embedding of the last item
# concat
user_feat_vec = tf.concat([user_feat_inputs, seq_outputs], -1) # (None,20+50)
# MLP
ffn_1 = Dense(units=60, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
ffn_2 = Dense(units=50, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
norm1 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
norm2 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
feat_vec = ffn_1(user_feat_vec)
feat_vec = norm1(feat_vec)
feat_vec = ffn_2(feat_vec) # (None,50)
user_vec = norm2(feat_vec)
### ********** ###
# item part
### ********** ###
item_info = item_embedding(sample_cspuidx_inputs) # (None, 1, dim)
item_emb_vec = item_info[:, -1] # (None, dim)
item_feat_vec = tf.concat([item_emb_vec, item_feat_inputs], -1) #(None, dim+35)
ffn_3 = Dense(units=50, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
norm3 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
item_vec = ffn_3(item_feat_vec) #(None, 50)
item_vec = norm3(item_vec)
# compute logits
logits = tf.reduce_sum(user_vec * item_vec, axis=-1, keepdims=True) # (None, 1)
logits = tf.nn.sigmoid(logits)
model = Model(inputs=[user_inputs, item_inputs], outputs=[logits])
model.__setattr__("user_inputs", user_inputs)
model.__setattr__("user_outputs", user_vec)
model.__setattr__("item_inputs", item_inputs)
model.__setattr__("item_outputs", item_vec)
return model
|
[
"tensorflow.range",
"tensorflow.reduce_sum",
"tensorflow.not_equal",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.initializers.he_uniform",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.keras.models.Model",
"tensorflow.cast",
"tensorflow.keras.layers.Input",
"tensorflow.split",
"tensorflow.keras.regularizers.l2"
] |
[((559, 616), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(170,)', 'dtype': 'tf.float32', 'name': '"""user_inputs"""'}), "(shape=(170,), dtype=tf.float32, name='user_inputs')\n", (564, 616), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((650, 706), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(36,)', 'dtype': 'tf.float32', 'name': '"""item_inputs"""'}), "(shape=(36,), dtype=tf.float32, name='item_inputs')\n", (655, 706), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((744, 803), 'tensorflow.split', 'tf.split', (['user_inputs'], {'axis': '(1)', 'num_or_size_splits': '[20, 150]'}), '(user_inputs, axis=1, num_or_size_splits=[20, 150])\n', (752, 803), True, 'import tensorflow as tf\n'), ((881, 938), 'tensorflow.split', 'tf.split', (['item_inputs'], {'axis': '(1)', 'num_or_size_splits': '[1, 35]'}), '(item_inputs, axis=1, num_or_size_splits=[1, 35])\n', (889, 938), True, 'import tensorflow as tf\n'), ((1097, 1137), 'tensorflow.cast', 'tf.cast', (['user_seq_inputs'], {'dtype': 'tf.int32'}), '(user_seq_inputs, dtype=tf.int32)\n', (1104, 1137), True, 'import tensorflow as tf\n'), ((2728, 2774), 'tensorflow.concat', 'tf.concat', (['[user_feat_inputs, seq_outputs]', '(-1)'], {}), '([user_feat_inputs, seq_outputs], -1)\n', (2737, 2774), True, 'import tensorflow as tf\n'), ((3042, 3095), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {'epsilon': '(1e-06)', 'trainable': 'use_norm'}), '(epsilon=1e-06, trainable=use_norm)\n', (3060, 3095), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((3107, 3160), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {'epsilon': '(1e-06)', 'trainable': 'use_norm'}), '(epsilon=1e-06, trainable=use_norm)\n', (3125, 3160), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((3511, 3558), 'tensorflow.concat', 'tf.concat', (['[item_emb_vec, item_feat_inputs]', '(-1)'], {}), '([item_emb_vec, item_feat_inputs], -1)\n', (3520, 3558), True, 'import tensorflow as tf\n'), ((3702, 3755), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {'epsilon': '(1e-06)', 'trainable': 'use_norm'}), '(epsilon=1e-06, trainable=use_norm)\n', (3720, 3755), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((3870, 3928), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(user_vec * item_vec)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(user_vec * item_vec, axis=-1, keepdims=True)\n', (3883, 3928), True, 'import tensorflow as tf\n'), ((3954, 3975), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (3967, 3975), True, 'import tensorflow as tf\n'), ((3989, 4047), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[user_inputs, item_inputs]', 'outputs': '[logits]'}), '(inputs=[user_inputs, item_inputs], outputs=[logits])\n', (3994, 4047), False, 'from tensorflow.keras.models import Model\n'), ((2182, 2198), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (2189, 2198), False, 'from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU\n'), ((1174, 1205), 'tensorflow.not_equal', 'tf.not_equal', (['new_seq_inputs', '(0)'], {}), '(new_seq_inputs, 0)\n', (1186, 1205), True, 'import tensorflow as tf\n'), ((1641, 1654), 'tensorflow.keras.regularizers.l2', 'l2', (['embed_reg'], {}), '(embed_reg)\n', (1643, 1654), False, 'from tensorflow.keras.regularizers import l2\n'), ((2045, 2058), 'tensorflow.keras.regularizers.l2', 'l2', (['embed_reg'], {}), '(embed_reg)\n', (2047, 2058), False, 'from tensorflow.keras.regularizers import l2\n'), ((2108, 2124), 'tensorflow.range', 'tf.range', (['maxlen'], {}), '(maxlen)\n', (2116, 2124), True, 'import tensorflow as tf\n'), ((2883, 2914), 'tensorflow.keras.initializers.he_uniform', 'keras.initializers.he_uniform', ([], {}), '()\n', (2912, 2914), False, 'from tensorflow import keras\n'), ((2997, 3028), 'tensorflow.keras.initializers.he_uniform', 'keras.initializers.he_uniform', ([], {}), '()\n', (3026, 3028), False, 'from tensorflow import keras\n'), ((3657, 3688), 'tensorflow.keras.initializers.he_uniform', 'keras.initializers.he_uniform', ([], {}), '()\n', (3686, 3688), False, 'from tensorflow import keras\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import logging
import unittest
import weakref
import mock
import pytest
from hikari import events
from hikari import iterators
from hikari.impl import bot
from hikari.utilities import event_stream
from tests.hikari import hikari_test_helpers
class TestStreamer:
@pytest.fixture(scope="module")
def stub_streamer(self):
return hikari_test_helpers.mock_class_namespace(event_stream.Streamer)
@pytest.mark.asyncio
async def test___aenter___and___aexit__(self, stub_streamer):
async with stub_streamer():
stub_streamer.open.assert_awaited_once()
stub_streamer.close.assert_not_called()
stub_streamer.open.assert_awaited_once()
stub_streamer.close.assert_awaited_once()
def test___enter__(self, stub_streamer):
# flake8 gets annoyed if we use "with" here so here's a hacky alternative
with pytest.raises(TypeError, match=" is async-only, did you mean 'async with'?"):
stub_streamer().__enter__()
def test___exit__(self, stub_streamer):
try:
stub_streamer().__exit__(None, None, None)
except AttributeError as exc:
pytest.fail(exc)
@pytest.fixture()
def mock_app():
return mock.Mock(bot.BotApp)
class TestEventStream:
@pytest.mark.asyncio
async def test__listener_when_filter_returns_false(self, mock_app):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None)
stream.filter(lambda _: False)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 0
@pytest.mark.asyncio
async def test__listener_when_filter_passes_and_queue_full(self):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=2)
stream._queue.put_nowait(object())
stream._queue.put_nowait(object())
stream.filter(lambda _: True)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 2
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is not mock_event
@pytest.mark.asyncio
async def test__listener_when_filter_passes_and_queue_not_full(self):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=None)
stream._queue.put_nowait(object())
stream._queue.put_nowait(object())
stream.filter(lambda _: True)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 3
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is mock_event
@pytest.mark.asyncio
async def test___anext___when_stream_closed(self):
streamer = hikari_test_helpers.stub_class(event_stream.EventStream, _active=False)
# flake8 gets annoyed if we use "with" here so here's a hacky alternative
with pytest.raises(TypeError):
await streamer.__anext__()
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext___times_out(self):
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_QUICK_RESPONSE_TIME,
)
async for _ in streamer:
pytest.fail("streamer shouldn't have yielded anything")
else:
# Ensure we don't get a warning or error on del
streamer._active = False
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext___waits_for_next_event(self):
mock_event = object()
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_SLEEP_TIME * 3,
)
async def add_event():
await asyncio.sleep(hikari_test_helpers.REASONABLE_SLEEP_TIME)
streamer._queue.put_nowait(mock_event)
asyncio.create_task(add_event())
async for event in streamer:
assert event is mock_event
# Ensure we don't get a warning or error on del
streamer._active = False
return
pytest.fail("streamer should've yielded something")
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext__(self):
mock_event = object()
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_QUICK_RESPONSE_TIME,
)
streamer._queue.put_nowait(mock_event)
async for event in streamer:
assert event is mock_event
# Ensure we don't get a warning or error on del
streamer._active = False
return
pytest.fail("streamer should've yielded something")
@pytest.mark.asyncio
async def test___await__(self):
mock_event_0 = object()
mock_event_1 = object()
mock_event_2 = object()
streamer = hikari_test_helpers.mock_class_namespace(
event_stream.EventStream,
close=mock.AsyncMock(),
open=mock.AsyncMock(),
init=False,
__anext__=mock.AsyncMock(side_effect=[mock_event_0, mock_event_1, mock_event_2]),
)()
streamer._active = False
assert await streamer == [mock_event_0, mock_event_1, mock_event_2]
streamer.open.assert_awaited_once()
streamer.close.assert_awaited_once()
def test___del___for_active_stream(self):
mock_coroutine = object()
close_method = mock.Mock(return_value=mock_coroutine)
streamer = hikari_test_helpers.mock_class_namespace(event_stream.EventStream, close=close_method, init=False)()
streamer._event_type = events.Event
streamer._active = True
with mock.patch.object(asyncio, "ensure_future", return_value=mock_coroutine):
with unittest.TestCase().assertLogs("hikari", level=logging.WARNING) as logging_watcher:
del streamer
assert logging_watcher.output == [
"WARNING:hikari:active 'Event' streamer fell out of scope before being closed"
]
asyncio.ensure_future.assert_called_once_with(mock_coroutine)
close_method.assert_called_once_with()
def test___del___for_inactive_stream(self):
close_method = mock.Mock()
streamer = hikari_test_helpers.mock_class_namespace(event_stream.EventStream, close=close_method, init=False)()
streamer._event_type = events.Event
streamer._active = False
with mock.patch.object(asyncio, "ensure_future"):
del streamer
asyncio.ensure_future.assert_not_called()
close_method.assert_not_called()
@pytest.mark.asyncio
async def test_close_for_inactive_stream(self, mock_app):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=None)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_not_called()
@pytest.mark.asyncio
async def test_close_for_active_stream(self, mock_app):
mock_registered_listener = object()
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_registered_listener,
)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_called_once_with(events.Event, mock_registered_listener)
assert stream._active is False
assert stream._registered_listener is None
@pytest.mark.asyncio
async def test_close_for_active_stream_handles_value_error(self, mock_app):
mock_registered_listener = object()
mock_app.dispatcher.unsubscribe.side_effect = ValueError
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_registered_listener,
)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_called_once_with(events.Event, mock_registered_listener)
assert stream._active is False
assert stream._registered_listener is None
def test_filter_for_inactive_stream(self):
stream = hikari_test_helpers.stub_class(event_stream.EventStream, _filters=iterators.All(()), _active=False)
first_pass = mock.Mock(attr=True)
second_pass = mock.Mock(attr=True)
first_fails = mock.Mock(attr=True)
second_fail = mock.Mock(attr=False)
def predicate(obj):
return obj in (first_pass, second_pass)
stream.filter(predicate, attr=True)
assert stream._filters(first_pass) is True
assert stream._filters(first_fails) is False
assert stream._filters(second_pass) is True
assert stream._filters(second_fail) is False
@pytest.mark.asyncio
async def test_filter_for_active_stream(self):
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
)
mock_wrapping_iterator = object()
predicate = object()
with mock.patch.object(iterators.LazyIterator, "filter", return_value=mock_wrapping_iterator):
assert stream.filter(predicate, name="OK") is mock_wrapping_iterator
iterators.LazyIterator.filter.assert_called_once_with(predicate, name="OK")
# Ensure we don't get a warning or error on del
stream._active = False
@pytest.mark.asyncio
async def test_open_for_inactive_stream(self, mock_app):
mock_listener = object()
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_listener,
)
with mock.patch.object(event_stream, "_generate_weak_listener"):
with mock.patch.object(weakref, "WeakMethod"):
await stream.open()
weakref.WeakMethod.assert_not_called()
event_stream._generate_weak_listener.assert_not_called()
mock_app.dispatcher.subscribe.assert_not_called()
assert stream._active is True
assert stream._registered_listener is mock_listener
# Ensure we don't get a warning or error on del
stream._active = False
@pytest.mark.asyncio
async def test_open_for_active_stream(self, mock_app):
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=False,
)
mock_listener = object()
mock_listener_ref = object()
with mock.patch.object(event_stream, "_generate_weak_listener", return_value=mock_listener):
with mock.patch.object(weakref, "WeakMethod", return_value=mock_listener_ref):
await stream.open()
weakref.WeakMethod.assert_called_once_with(stream._listener)
event_stream._generate_weak_listener.assert_called_once_with(mock_listener_ref)
mock_app.dispatcher.subscribe.assert_called_once_with(events.Event, mock_listener)
assert stream._active is True
assert stream._registered_listener is mock_listener
# Ensure we don't get a warning or error on del
stream._active = False
|
[
"hikari.utilities.event_stream._generate_weak_listener.assert_not_called",
"weakref.WeakMethod.assert_not_called",
"unittest.TestCase",
"tests.hikari.hikari_test_helpers.mock_class_namespace",
"hikari.iterators.LazyIterator.filter.assert_called_once_with",
"hikari.iterators.All",
"tests.hikari.hikari_test_helpers.timeout",
"mock.AsyncMock",
"asyncio.ensure_future.assert_called_once_with",
"weakref.WeakMethod.assert_called_once_with",
"pytest.fail",
"pytest.raises",
"hikari.utilities.event_stream._generate_weak_listener.assert_called_once_with",
"mock.patch.object",
"hikari.utilities.event_stream.EventStream",
"asyncio.sleep",
"pytest.fixture",
"tests.hikari.hikari_test_helpers.stub_class",
"asyncio.ensure_future.assert_not_called",
"mock.Mock",
"asyncio.Queue"
] |
[((2311, 2327), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2325, 2327), False, 'import pytest\n'), ((1397, 1427), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1411, 1427), False, 'import pytest\n'), ((2355, 2376), 'mock.Mock', 'mock.Mock', (['bot.BotApp'], {}), '(bot.BotApp)\n', (2364, 2376), False, 'import mock\n'), ((4296, 4325), 'tests.hikari.hikari_test_helpers.timeout', 'hikari_test_helpers.timeout', ([], {}), '()\n', (4323, 4325), False, 'from tests.hikari import hikari_test_helpers\n'), ((4850, 4879), 'tests.hikari.hikari_test_helpers.timeout', 'hikari_test_helpers.timeout', ([], {}), '()\n', (4877, 4879), False, 'from tests.hikari import hikari_test_helpers\n'), ((5682, 5711), 'tests.hikari.hikari_test_helpers.timeout', 'hikari_test_helpers.timeout', ([], {}), '()\n', (5709, 5711), False, 'from tests.hikari import hikari_test_helpers\n'), ((1472, 1535), 'tests.hikari.hikari_test_helpers.mock_class_namespace', 'hikari_test_helpers.mock_class_namespace', (['event_stream.Streamer'], {}), '(event_stream.Streamer)\n', (1512, 1535), False, 'from tests.hikari import hikari_test_helpers\n'), ((2516, 2578), 'hikari.utilities.event_stream.EventStream', 'event_stream.EventStream', (['mock_app', 'events.Event'], {'timeout': 'None'}), '(mock_app, events.Event, timeout=None)\n', (2540, 2578), False, 'from hikari.utilities import event_stream\n'), ((2862, 2933), 'hikari.utilities.event_stream.EventStream', 'event_stream.EventStream', (['mock_app', 'events.Event'], {'timeout': 'None', 'limit': '(2)'}), '(mock_app, events.Event, timeout=None, limit=2)\n', (2886, 2933), False, 'from hikari.utilities import event_stream\n'), ((3426, 3500), 'hikari.utilities.event_stream.EventStream', 'event_stream.EventStream', (['mock_app', 'events.Event'], {'timeout': 'None', 'limit': 'None'}), '(mock_app, events.Event, timeout=None, limit=None)\n', (3450, 3500), False, 'from hikari.utilities import event_stream\n'), ((4032, 4103), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_active': '(False)'}), '(event_stream.EventStream, _active=False)\n', (4062, 4103), False, 'from tests.hikari import hikari_test_helpers\n'), ((5599, 5650), 'pytest.fail', 'pytest.fail', (['"""streamer should\'ve yielded something"""'], {}), '("streamer should\'ve yielded something")\n', (5610, 5650), False, 'import pytest\n'), ((6262, 6313), 'pytest.fail', 'pytest.fail', (['"""streamer should\'ve yielded something"""'], {}), '("streamer should\'ve yielded something")\n', (6273, 6313), False, 'import pytest\n'), ((7074, 7112), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'mock_coroutine'}), '(return_value=mock_coroutine)\n', (7083, 7112), False, 'import mock\n'), ((7891, 7902), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (7900, 7902), False, 'import mock\n'), ((8385, 8459), 'hikari.utilities.event_stream.EventStream', 'event_stream.EventStream', (['mock_app', 'events.Event'], {'timeout': 'None', 'limit': 'None'}), '(mock_app, events.Event, timeout=None, limit=None)\n', (8409, 8459), False, 'from hikari.utilities import event_stream\n'), ((8696, 8863), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_app': 'mock_app', '_event_type': 'events.Event', '_active': '(True)', '_registered_listener': 'mock_registered_listener'}), '(event_stream.EventStream, _app=mock_app,\n _event_type=events.Event, _active=True, _registered_listener=\n mock_registered_listener)\n', (8726, 8863), False, 'from tests.hikari import hikari_test_helpers\n'), ((9381, 9548), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_app': 'mock_app', '_event_type': 'events.Event', '_active': '(True)', '_registered_listener': 'mock_registered_listener'}), '(event_stream.EventStream, _app=mock_app,\n _event_type=events.Event, _active=True, _registered_listener=\n mock_registered_listener)\n', (9411, 9548), False, 'from tests.hikari import hikari_test_helpers\n'), ((10020, 10040), 'mock.Mock', 'mock.Mock', ([], {'attr': '(True)'}), '(attr=True)\n', (10029, 10040), False, 'import mock\n'), ((10063, 10083), 'mock.Mock', 'mock.Mock', ([], {'attr': '(True)'}), '(attr=True)\n', (10072, 10083), False, 'import mock\n'), ((10106, 10126), 'mock.Mock', 'mock.Mock', ([], {'attr': '(True)'}), '(attr=True)\n', (10115, 10126), False, 'import mock\n'), ((10149, 10170), 'mock.Mock', 'mock.Mock', ([], {'attr': '(False)'}), '(attr=False)\n', (10158, 10170), False, 'import mock\n'), ((10601, 10671), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_active': '(True)'}), '(event_stream.EventStream, _active=True)\n', (10631, 10671), False, 'from tests.hikari import hikari_test_helpers\n'), ((11277, 11428), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_app': 'mock_app', '_event_type': 'events.Event', '_active': '(True)', '_registered_listener': 'mock_listener'}), '(event_stream.EventStream, _app=mock_app,\n _event_type=events.Event, _active=True, _registered_listener=mock_listener)\n', (11307, 11428), False, 'from tests.hikari import hikari_test_helpers\n'), ((12137, 12253), 'tests.hikari.hikari_test_helpers.stub_class', 'hikari_test_helpers.stub_class', (['event_stream.EventStream'], {'_app': 'mock_app', '_event_type': 'events.Event', '_active': '(False)'}), '(event_stream.EventStream, _app=mock_app,\n _event_type=events.Event, _active=False)\n', (12167, 12253), False, 'from tests.hikari import hikari_test_helpers\n'), ((2010, 2086), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '""" is async-only, did you mean \'async with\'?"""'}), '(TypeError, match=" is async-only, did you mean \'async with\'?")\n', (2023, 2086), False, 'import pytest\n'), ((4200, 4224), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4213, 4224), False, 'import pytest\n'), ((4652, 4707), 'pytest.fail', 'pytest.fail', (['"""streamer shouldn\'t have yielded anything"""'], {}), '("streamer shouldn\'t have yielded anything")\n', (4663, 4707), False, 'import pytest\n'), ((7132, 7235), 'tests.hikari.hikari_test_helpers.mock_class_namespace', 'hikari_test_helpers.mock_class_namespace', (['event_stream.EventStream'], {'close': 'close_method', 'init': '(False)'}), '(event_stream.EventStream, close=\n close_method, init=False)\n', (7172, 7235), False, 'from tests.hikari import hikari_test_helpers\n'), ((7323, 7395), 'mock.patch.object', 'mock.patch.object', (['asyncio', '"""ensure_future"""'], {'return_value': 'mock_coroutine'}), "(asyncio, 'ensure_future', return_value=mock_coroutine)\n", (7340, 7395), False, 'import mock\n'), ((7709, 7770), 'asyncio.ensure_future.assert_called_once_with', 'asyncio.ensure_future.assert_called_once_with', (['mock_coroutine'], {}), '(mock_coroutine)\n', (7754, 7770), False, 'import asyncio\n'), ((7922, 8025), 'tests.hikari.hikari_test_helpers.mock_class_namespace', 'hikari_test_helpers.mock_class_namespace', (['event_stream.EventStream'], {'close': 'close_method', 'init': '(False)'}), '(event_stream.EventStream, close=\n close_method, init=False)\n', (7962, 8025), False, 'from tests.hikari import hikari_test_helpers\n'), ((8114, 8157), 'mock.patch.object', 'mock.patch.object', (['asyncio', '"""ensure_future"""'], {}), "(asyncio, 'ensure_future')\n", (8131, 8157), False, 'import mock\n'), ((8196, 8237), 'asyncio.ensure_future.assert_not_called', 'asyncio.ensure_future.assert_not_called', ([], {}), '()\n', (8235, 8237), False, 'import asyncio\n'), ((10792, 10885), 'mock.patch.object', 'mock.patch.object', (['iterators.LazyIterator', '"""filter"""'], {'return_value': 'mock_wrapping_iterator'}), "(iterators.LazyIterator, 'filter', return_value=\n mock_wrapping_iterator)\n", (10809, 10885), False, 'import mock\n'), ((10976, 11051), 'hikari.iterators.LazyIterator.filter.assert_called_once_with', 'iterators.LazyIterator.filter.assert_called_once_with', (['predicate'], {'name': '"""OK"""'}), "(predicate, name='OK')\n", (11029, 11051), False, 'from hikari import iterators\n'), ((11510, 11568), 'mock.patch.object', 'mock.patch.object', (['event_stream', '"""_generate_weak_listener"""'], {}), "(event_stream, '_generate_weak_listener')\n", (11527, 11568), False, 'import mock\n'), ((11733, 11789), 'hikari.utilities.event_stream._generate_weak_listener.assert_not_called', 'event_stream._generate_weak_listener.assert_not_called', ([], {}), '()\n', (11787, 11789), False, 'from hikari.utilities import event_stream\n'), ((12393, 12484), 'mock.patch.object', 'mock.patch.object', (['event_stream', '"""_generate_weak_listener"""'], {'return_value': 'mock_listener'}), "(event_stream, '_generate_weak_listener', return_value=\n mock_listener)\n", (12410, 12484), False, 'import mock\n'), ((12698, 12777), 'hikari.utilities.event_stream._generate_weak_listener.assert_called_once_with', 'event_stream._generate_weak_listener.assert_called_once_with', (['mock_listener_ref'], {}), '(mock_listener_ref)\n', (12758, 12777), False, 'from hikari.utilities import event_stream\n'), ((2291, 2307), 'pytest.fail', 'pytest.fail', (['exc'], {}), '(exc)\n', (2302, 2307), False, 'import pytest\n'), ((4506, 4521), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (4519, 4521), False, 'import asyncio\n'), ((5101, 5116), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (5114, 5116), False, 'import asyncio\n'), ((5246, 5302), 'asyncio.sleep', 'asyncio.sleep', (['hikari_test_helpers.REASONABLE_SLEEP_TIME'], {}), '(hikari_test_helpers.REASONABLE_SLEEP_TIME)\n', (5259, 5302), False, 'import asyncio\n'), ((5912, 5927), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (5925, 5927), False, 'import asyncio\n'), ((9965, 9982), 'hikari.iterators.All', 'iterators.All', (['()'], {}), '(())\n', (9978, 9982), False, 'from hikari import iterators\n'), ((11587, 11627), 'mock.patch.object', 'mock.patch.object', (['weakref', '"""WeakMethod"""'], {}), "(weakref, 'WeakMethod')\n", (11604, 11627), False, 'import mock\n'), ((11682, 11720), 'weakref.WeakMethod.assert_not_called', 'weakref.WeakMethod.assert_not_called', ([], {}), '()\n', (11718, 11720), False, 'import weakref\n'), ((12498, 12570), 'mock.patch.object', 'mock.patch.object', (['weakref', '"""WeakMethod"""'], {'return_value': 'mock_listener_ref'}), "(weakref, 'WeakMethod', return_value=mock_listener_ref)\n", (12515, 12570), False, 'import mock\n'), ((12625, 12685), 'weakref.WeakMethod.assert_called_once_with', 'weakref.WeakMethod.assert_called_once_with', (['stream._listener'], {}), '(stream._listener)\n', (12667, 12685), False, 'import weakref\n'), ((6589, 6605), 'mock.AsyncMock', 'mock.AsyncMock', ([], {}), '()\n', (6603, 6605), False, 'import mock\n'), ((6624, 6640), 'mock.AsyncMock', 'mock.AsyncMock', ([], {}), '()\n', (6638, 6640), False, 'import mock\n'), ((6688, 6758), 'mock.AsyncMock', 'mock.AsyncMock', ([], {'side_effect': '[mock_event_0, mock_event_1, mock_event_2]'}), '(side_effect=[mock_event_0, mock_event_1, mock_event_2])\n', (6702, 6758), False, 'import mock\n'), ((7414, 7433), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (7431, 7433), False, 'import unittest\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AzureInternalMonitoringPipelineSinkDescriptionArgs',
'ContainerCodePackagePropertiesArgs',
'ContainerLabelArgs',
'ContainerVolumeArgs',
'DiagnosticsDescriptionArgs',
'DiagnosticsRefArgs',
'EndpointPropertiesArgs',
'EnvironmentVariableArgs',
'ImageRegistryCredentialArgs',
'IngressConfigArgs',
'Layer4IngressConfigArgs',
'NetworkRefArgs',
'ResourceLimitsArgs',
'ResourceRequestsArgs',
'ResourceRequirementsArgs',
'ServiceResourceDescriptionArgs',
'SettingArgs',
'VolumeProviderParametersAzureFileArgs',
]
@pulumi.input_type
class AzureInternalMonitoringPipelineSinkDescriptionArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
account_name: Optional[pulumi.Input[str]] = None,
auto_key_config_url: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
fluentd_config_url: Optional[Any] = None,
ma_config_url: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None):
"""
Diagnostics settings for Geneva.
:param pulumi.Input[str] kind: The kind of DiagnosticsSink.
Expected value is 'AzureInternalMonitoringPipeline'.
:param pulumi.Input[str] account_name: Azure Internal monitoring pipeline account.
:param pulumi.Input[str] auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the certificate.
:param pulumi.Input[str] description: A description of the sink.
:param Any fluentd_config_url: Azure Internal monitoring agent fluentd configuration.
:param pulumi.Input[str] ma_config_url: Azure Internal monitoring agent configuration.
:param pulumi.Input[str] name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription
:param pulumi.Input[str] namespace: Azure Internal monitoring pipeline account namespace.
"""
pulumi.set(__self__, "kind", 'AzureInternalMonitoringPipeline')
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if auto_key_config_url is not None:
pulumi.set(__self__, "auto_key_config_url", auto_key_config_url)
if description is not None:
pulumi.set(__self__, "description", description)
if fluentd_config_url is not None:
pulumi.set(__self__, "fluentd_config_url", fluentd_config_url)
if ma_config_url is not None:
pulumi.set(__self__, "ma_config_url", ma_config_url)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of DiagnosticsSink.
Expected value is 'AzureInternalMonitoringPipeline'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="autoKeyConfigUrl")
def auto_key_config_url(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline autokey associated with the certificate.
"""
return pulumi.get(self, "auto_key_config_url")
@auto_key_config_url.setter
def auto_key_config_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_key_config_url", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the sink.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="fluentdConfigUrl")
def fluentd_config_url(self) -> Optional[Any]:
"""
Azure Internal monitoring agent fluentd configuration.
"""
return pulumi.get(self, "fluentd_config_url")
@fluentd_config_url.setter
def fluentd_config_url(self, value: Optional[Any]):
pulumi.set(self, "fluentd_config_url", value)
@property
@pulumi.getter(name="maConfigUrl")
def ma_config_url(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring agent configuration.
"""
return pulumi.get(self, "ma_config_url")
@ma_config_url.setter
def ma_config_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ma_config_url", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the sink. This value is referenced by DiagnosticsReferenceDescription
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline account namespace.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class ContainerCodePackagePropertiesArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
name: pulumi.Input[str],
resources: pulumi.Input['ResourceRequirementsArgs'],
commands: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
diagnostics: Optional[pulumi.Input['DiagnosticsRefArgs']] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]] = None,
entrypoint: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]] = None,
image_registry_credential: Optional[pulumi.Input['ImageRegistryCredentialArgs']] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]] = None,
settings: Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]] = None,
volume_refs: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]] = None):
"""
Describes a container and its runtime properties.
:param pulumi.Input[str] image: The Container image to use.
:param pulumi.Input[str] name: The name of the code package.
:param pulumi.Input['ResourceRequirementsArgs'] resources: This type describes the resource requirements for a container or a service.
:param pulumi.Input[Sequence[pulumi.Input[str]]] commands: Command array to execute within the container in exec form.
:param pulumi.Input['DiagnosticsRefArgs'] diagnostics: Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]] endpoints: The endpoints exposed by this container.
:param pulumi.Input[str] entrypoint: Override for the default entry point in the container.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]] environment_variables: The environment variables to set in this container
:param pulumi.Input['ImageRegistryCredentialArgs'] image_registry_credential: Image registry credential.
:param pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]] labels: The labels to set in this container.
:param pulumi.Input[Sequence[pulumi.Input['SettingArgs']]] settings: The settings to set in this container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\\secrets". The path for Linux container is "/var/secrets".
:param pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]] volume_refs: The volumes to be attached to the container.
"""
pulumi.set(__self__, "image", image)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resources", resources)
if commands is not None:
pulumi.set(__self__, "commands", commands)
if diagnostics is not None:
pulumi.set(__self__, "diagnostics", diagnostics)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if entrypoint is not None:
pulumi.set(__self__, "entrypoint", entrypoint)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if image_registry_credential is not None:
pulumi.set(__self__, "image_registry_credential", image_registry_credential)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if volume_refs is not None:
pulumi.set(__self__, "volume_refs", volume_refs)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
The Container image to use.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the code package.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def resources(self) -> pulumi.Input['ResourceRequirementsArgs']:
"""
This type describes the resource requirements for a container or a service.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: pulumi.Input['ResourceRequirementsArgs']):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def commands(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command array to execute within the container in exec form.
"""
return pulumi.get(self, "commands")
@commands.setter
def commands(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "commands", value)
@property
@pulumi.getter
def diagnostics(self) -> Optional[pulumi.Input['DiagnosticsRefArgs']]:
"""
Reference to sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "diagnostics")
@diagnostics.setter
def diagnostics(self, value: Optional[pulumi.Input['DiagnosticsRefArgs']]):
pulumi.set(self, "diagnostics", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]]:
"""
The endpoints exposed by this container.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def entrypoint(self) -> Optional[pulumi.Input[str]]:
"""
Override for the default entry point in the container.
"""
return pulumi.get(self, "entrypoint")
@entrypoint.setter
def entrypoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entrypoint", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]]:
"""
The environment variables to set in this container
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="imageRegistryCredential")
def image_registry_credential(self) -> Optional[pulumi.Input['ImageRegistryCredentialArgs']]:
"""
Image registry credential.
"""
return pulumi.get(self, "image_registry_credential")
@image_registry_credential.setter
def image_registry_credential(self, value: Optional[pulumi.Input['ImageRegistryCredentialArgs']]):
pulumi.set(self, "image_registry_credential", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]]:
"""
The labels to set in this container.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]]:
"""
The settings to set in this container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\\secrets". The path for Linux container is "/var/secrets".
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter(name="volumeRefs")
def volume_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]]:
"""
The volumes to be attached to the container.
"""
return pulumi.get(self, "volume_refs")
@volume_refs.setter
def volume_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]]):
pulumi.set(self, "volume_refs", value)
@pulumi.input_type
class ContainerLabelArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Describes a container label.
:param pulumi.Input[str] name: The name of the container label.
:param pulumi.Input[str] value: The value of the container label.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the container label.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the container label.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ContainerVolumeArgs:
def __init__(__self__, *,
destination_path: pulumi.Input[str],
name: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
Describes how a volume is attached to a container.
:param pulumi.Input[str] destination_path: The path within the container at which the volume should be mounted. Only valid path characters are allowed.
:param pulumi.Input[str] name: Name of the volume.
:param pulumi.Input[bool] read_only: The flag indicating whether the volume is read only. Default is 'false'.
"""
pulumi.set(__self__, "destination_path", destination_path)
pulumi.set(__self__, "name", name)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="destinationPath")
def destination_path(self) -> pulumi.Input[str]:
"""
The path within the container at which the volume should be mounted. Only valid path characters are allowed.
"""
return pulumi.get(self, "destination_path")
@destination_path.setter
def destination_path(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
The flag indicating whether the volume is read only. Default is 'false'.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class DiagnosticsDescriptionArgs:
def __init__(__self__, *,
default_sink_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
sinks: Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]] = None):
"""
Describes the diagnostics options available
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be overridden at the service and code package level.
:param pulumi.Input[bool] enabled: Status of whether or not sinks are enabled.
:param pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]] sinks: List of supported sinks that can be referenced.
"""
if default_sink_refs is not None:
pulumi.set(__self__, "default_sink_refs", default_sink_refs)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if sinks is not None:
pulumi.set(__self__, "sinks", sinks)
@property
@pulumi.getter(name="defaultSinkRefs")
def default_sink_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The sinks to be used if diagnostics is enabled. Sink choices can be overridden at the service and code package level.
"""
return pulumi.get(self, "default_sink_refs")
@default_sink_refs.setter
def default_sink_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "default_sink_refs", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Status of whether or not sinks are enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def sinks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]]:
"""
List of supported sinks that can be referenced.
"""
return pulumi.get(self, "sinks")
@sinks.setter
def sinks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]]):
pulumi.set(self, "sinks", value)
@pulumi.input_type
class DiagnosticsRefArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
sink_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[bool] enabled: Status of whether or not sinks are enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] sink_refs: List of sinks to be used if enabled. References the list of sinks in DiagnosticsDescription.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if sink_refs is not None:
pulumi.set(__self__, "sink_refs", sink_refs)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Status of whether or not sinks are enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="sinkRefs")
def sink_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of sinks to be used if enabled. References the list of sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "sink_refs")
@sink_refs.setter
def sink_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "sink_refs", value)
@pulumi.input_type
class EndpointPropertiesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
port: Optional[pulumi.Input[int]] = None):
"""
Describes a container endpoint.
:param pulumi.Input[str] name: The name of the endpoint.
:param pulumi.Input[int] port: Port used by the container.
"""
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the endpoint.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port used by the container.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class EnvironmentVariableArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Describes an environment variable for the container.
:param pulumi.Input[str] name: The name of the environment variable.
:param pulumi.Input[str] value: The value of the environment variable.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the environment variable.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the environment variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ImageRegistryCredentialArgs:
def __init__(__self__, *,
server: pulumi.Input[str],
username: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None):
"""
Image registry credential.
:param pulumi.Input[str] server: Docker image registry server, without protocol such as `http` and `https`.
:param pulumi.Input[str] username: The username for the private registry.
:param pulumi.Input[str] password: The password for the private registry.
"""
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
Docker image registry server, without protocol such as `http` and `https`.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username for the private registry.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the private registry.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@pulumi.input_type
class IngressConfigArgs:
def __init__(__self__, *,
layer4: Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]] = None,
qos_level: Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]] = None):
"""
Describes public connectivity configuration for the network.
:param pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]] layer4: Configuration for layer4 public connectivity for this network.
:param pulumi.Input[Union[str, 'IngressQoSLevel']] qos_level: The QoS tier for ingress.
"""
if layer4 is not None:
pulumi.set(__self__, "layer4", layer4)
if qos_level is not None:
pulumi.set(__self__, "qos_level", qos_level)
@property
@pulumi.getter
def layer4(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]]:
"""
Configuration for layer4 public connectivity for this network.
"""
return pulumi.get(self, "layer4")
@layer4.setter
def layer4(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]]):
pulumi.set(self, "layer4", value)
@property
@pulumi.getter(name="qosLevel")
def qos_level(self) -> Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]]:
"""
The QoS tier for ingress.
"""
return pulumi.get(self, "qos_level")
@qos_level.setter
def qos_level(self, value: Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]]):
pulumi.set(self, "qos_level", value)
@pulumi.input_type
class Layer4IngressConfigArgs:
def __init__(__self__, *,
application_name: Optional[pulumi.Input[str]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_port: Optional[pulumi.Input[int]] = None,
service_name: Optional[pulumi.Input[str]] = None):
"""
Describes the layer4 configuration for public connectivity for this network.
:param pulumi.Input[str] application_name: The application name which contains the service to be exposed.
:param pulumi.Input[str] endpoint_name: The service endpoint that needs to be exposed.
:param pulumi.Input[str] name: Layer4 ingress config name.
:param pulumi.Input[int] public_port: Specifies the public port at which the service endpoint below needs to be exposed.
:param pulumi.Input[str] service_name: The service whose endpoint needs to be exposed at the public port.
"""
if application_name is not None:
pulumi.set(__self__, "application_name", application_name)
if endpoint_name is not None:
pulumi.set(__self__, "endpoint_name", endpoint_name)
if name is not None:
pulumi.set(__self__, "name", name)
if public_port is not None:
pulumi.set(__self__, "public_port", public_port)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> Optional[pulumi.Input[str]]:
"""
The application name which contains the service to be exposed.
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="endpointName")
def endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The service endpoint that needs to be exposed.
"""
return pulumi.get(self, "endpoint_name")
@endpoint_name.setter
def endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Layer4 ingress config name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the public port at which the service endpoint below needs to be exposed.
"""
return pulumi.get(self, "public_port")
@public_port.setter
def public_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "public_port", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The service whose endpoint needs to be exposed at the public port.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@pulumi.input_type
class NetworkRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Describes a network reference in a service.
:param pulumi.Input[str] name: Name of the network.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the network.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ResourceLimitsArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[float]] = None,
memory_in_gb: Optional[pulumi.Input[float]] = None):
"""
This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted.
:param pulumi.Input[float] cpu: CPU limits in cores. At present, only full cores are supported.
:param pulumi.Input[float] memory_in_gb: The memory limit in GB.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory_in_gb is not None:
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[float]]:
"""
CPU limits in cores. At present, only full cores are supported.
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> Optional[pulumi.Input[float]]:
"""
The memory limit in GB.
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb", value)
@pulumi.input_type
class ResourceRequestsArgs:
def __init__(__self__, *,
cpu: pulumi.Input[float],
memory_in_gb: pulumi.Input[float]):
"""
This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits.
:param pulumi.Input[float] cpu: Requested number of CPU cores. At present, only full cores are supported.
:param pulumi.Input[float] memory_in_gb: The memory request in GB for this container.
"""
pulumi.set(__self__, "cpu", cpu)
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
@property
@pulumi.getter
def cpu(self) -> pulumi.Input[float]:
"""
Requested number of CPU cores. At present, only full cores are supported.
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: pulumi.Input[float]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> pulumi.Input[float]:
"""
The memory request in GB for this container.
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: pulumi.Input[float]):
pulumi.set(self, "memory_in_gb", value)
@pulumi.input_type
class ResourceRequirementsArgs:
def __init__(__self__, *,
requests: pulumi.Input['ResourceRequestsArgs'],
limits: Optional[pulumi.Input['ResourceLimitsArgs']] = None):
"""
This type describes the resource requirements for a container or a service.
:param pulumi.Input['ResourceRequestsArgs'] requests: Describes the requested resources for a given container.
:param pulumi.Input['ResourceLimitsArgs'] limits: Describes the maximum limits on the resources for a given container.
"""
pulumi.set(__self__, "requests", requests)
if limits is not None:
pulumi.set(__self__, "limits", limits)
@property
@pulumi.getter
def requests(self) -> pulumi.Input['ResourceRequestsArgs']:
"""
Describes the requested resources for a given container.
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: pulumi.Input['ResourceRequestsArgs']):
pulumi.set(self, "requests", value)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input['ResourceLimitsArgs']]:
"""
Describes the maximum limits on the resources for a given container.
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input['ResourceLimitsArgs']]):
pulumi.set(self, "limits", value)
@pulumi.input_type
class ServiceResourceDescriptionArgs:
def __init__(__self__, *,
code_packages: pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]],
os_type: pulumi.Input[Union[str, 'OperatingSystemTypes']],
description: Optional[pulumi.Input[str]] = None,
diagnostics: Optional[pulumi.Input['DiagnosticsRefArgs']] = None,
health_state: Optional[pulumi.Input[Union[str, 'HealthState']]] = None,
name: Optional[pulumi.Input[str]] = None,
network_refs: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]] = None,
replica_count: Optional[pulumi.Input[int]] = None):
"""
This type describes a service resource.
:param pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]] code_packages: Describes the set of code packages that forms the service. A code package describes the container and the properties for running it. All the code packages are started together on the same host and share the same context (network, process etc.).
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The Operating system type required by the code in service.
:param pulumi.Input[str] description: User readable description of the service.
:param pulumi.Input['DiagnosticsRefArgs'] diagnostics: Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[Union[str, 'HealthState']] health_state: The health state of a resource such as Application, Service, or Network.
:param pulumi.Input[str] name: The name of the resource
:param pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]] network_refs: The names of the private networks that this service needs to be part of.
:param pulumi.Input[int] replica_count: The number of replicas of the service to create. Defaults to 1 if not specified.
"""
pulumi.set(__self__, "code_packages", code_packages)
pulumi.set(__self__, "os_type", os_type)
if description is not None:
pulumi.set(__self__, "description", description)
if diagnostics is not None:
pulumi.set(__self__, "diagnostics", diagnostics)
if health_state is not None:
pulumi.set(__self__, "health_state", health_state)
if name is not None:
pulumi.set(__self__, "name", name)
if network_refs is not None:
pulumi.set(__self__, "network_refs", network_refs)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
@property
@pulumi.getter(name="codePackages")
def code_packages(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]]:
"""
Describes the set of code packages that forms the service. A code package describes the container and the properties for running it. All the code packages are started together on the same host and share the same context (network, process etc.).
"""
return pulumi.get(self, "code_packages")
@code_packages.setter
def code_packages(self, value: pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]]):
pulumi.set(self, "code_packages", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Input[Union[str, 'OperatingSystemTypes']]:
"""
The Operating system type required by the code in service.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: pulumi.Input[Union[str, 'OperatingSystemTypes']]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User readable description of the service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def diagnostics(self) -> Optional[pulumi.Input['DiagnosticsRefArgs']]:
"""
Reference to sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "diagnostics")
@diagnostics.setter
def diagnostics(self, value: Optional[pulumi.Input['DiagnosticsRefArgs']]):
pulumi.set(self, "diagnostics", value)
@property
@pulumi.getter(name="healthState")
def health_state(self) -> Optional[pulumi.Input[Union[str, 'HealthState']]]:
"""
The health state of a resource such as Application, Service, or Network.
"""
return pulumi.get(self, "health_state")
@health_state.setter
def health_state(self, value: Optional[pulumi.Input[Union[str, 'HealthState']]]):
pulumi.set(self, "health_state", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkRefs")
def network_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]]:
"""
The names of the private networks that this service needs to be part of.
"""
return pulumi.get(self, "network_refs")
@network_refs.setter
def network_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]]):
pulumi.set(self, "network_refs", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the service to create. Defaults to 1 if not specified.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@pulumi.input_type
class SettingArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Describes a setting for the container.
:param pulumi.Input[str] name: The name of the setting.
:param pulumi.Input[str] value: The value of the setting.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the setting.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class VolumeProviderParametersAzureFileArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
share_name: pulumi.Input[str],
account_key: Optional[pulumi.Input[str]] = None):
"""
This type describes a volume provided by an Azure Files file share.
:param pulumi.Input[str] account_name: Name of the Azure storage account for the File Share.
:param pulumi.Input[str] share_name: Name of the Azure Files file share that provides storage for the volume.
:param pulumi.Input[str] account_key: Access key of the Azure storage account for the File Share.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "share_name", share_name)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Name of the Azure storage account for the File Share.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Input[str]:
"""
Name of the Azure Files file share that provides storage for the volume.
"""
return pulumi.get(self, "share_name")
@share_name.setter
def share_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_name", value)
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[pulumi.Input[str]]:
"""
Access key of the Azure storage account for the File Share.
"""
return pulumi.get(self, "account_key")
@account_key.setter
def account_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_key", value)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set"
] |
[((3575, 3608), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accountName"""'}), "(name='accountName')\n", (3588, 3608), False, 'import pulumi\n'), ((3950, 3988), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""autoKeyConfigUrl"""'}), "(name='autoKeyConfigUrl')\n", (3963, 3988), False, 'import pulumi\n'), ((4730, 4768), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""fluentdConfigUrl"""'}), "(name='fluentdConfigUrl')\n", (4743, 4768), False, 'import pulumi\n'), ((5123, 5156), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""maConfigUrl"""'}), "(name='maConfigUrl')\n", (5136, 5156), False, 'import pulumi\n'), ((12552, 12594), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""environmentVariables"""'}), "(name='environmentVariables')\n", (12565, 12594), False, 'import pulumi\n'), ((13080, 13125), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""imageRegistryCredential"""'}), "(name='imageRegistryCredential')\n", (13093, 13125), False, 'import pulumi\n'), ((14547, 14579), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeRefs"""'}), "(name='volumeRefs')\n", (14560, 14579), False, 'import pulumi\n'), ((16894, 16931), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""destinationPath"""'}), "(name='destinationPath')\n", (16907, 16931), False, 'import pulumi\n'), ((17609, 17639), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""readOnly"""'}), "(name='readOnly')\n", (17622, 17639), False, 'import pulumi\n'), ((19172, 19209), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""defaultSinkRefs"""'}), "(name='defaultSinkRefs')\n", (19185, 19209), False, 'import pulumi\n'), ((21563, 21593), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""sinkRefs"""'}), "(name='sinkRefs')\n", (21576, 21593), False, 'import pulumi\n'), ((27276, 27306), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""qosLevel"""'}), "(name='qosLevel')\n", (27289, 27306), False, 'import pulumi\n'), ((29197, 29234), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""applicationName"""'}), "(name='applicationName')\n", (29210, 29234), False, 'import pulumi\n'), ((29615, 29649), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""endpointName"""'}), "(name='endpointName')\n", (29628, 29649), False, 'import pulumi\n'), ((30298, 30330), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""publicPort"""'}), "(name='publicPort')\n", (30311, 30330), False, 'import pulumi\n'), ((30706, 30739), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""serviceName"""'}), "(name='serviceName')\n", (30719, 30739), False, 'import pulumi\n'), ((32823, 32855), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryInGB"""'}), "(name='memoryInGB')\n", (32836, 32855), False, 'import pulumi\n'), ((34322, 34354), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""memoryInGB"""'}), "(name='memoryInGB')\n", (34335, 34354), False, 'import pulumi\n'), ((38814, 38848), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""codePackages"""'}), "(name='codePackages')\n", (38827, 38848), False, 'import pulumi\n'), ((39490, 39518), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""osType"""'}), "(name='osType')\n", (39503, 39518), False, 'import pulumi\n'), ((40626, 40659), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""healthState"""'}), "(name='healthState')\n", (40639, 40659), False, 'import pulumi\n'), ((41370, 41403), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkRefs"""'}), "(name='networkRefs')\n", (41383, 41403), False, 'import pulumi\n'), ((41848, 41882), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""replicaCount"""'}), "(name='replicaCount')\n", (41861, 41882), False, 'import pulumi\n'), ((44307, 44340), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accountName"""'}), "(name='accountName')\n", (44320, 44340), False, 'import pulumi\n'), ((44672, 44703), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""shareName"""'}), "(name='shareName')\n", (44685, 44703), False, 'import pulumi\n'), ((45044, 45076), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""accountKey"""'}), "(name='accountKey')\n", (45057, 45076), False, 'import pulumi\n'), ((2444, 2507), 'pulumi.set', 'pulumi.set', (['__self__', '"""kind"""', '"""AzureInternalMonitoringPipeline"""'], {}), "(__self__, 'kind', 'AzureInternalMonitoringPipeline')\n", (2454, 2507), False, 'import pulumi\n'), ((3426, 3450), 'pulumi.get', 'pulumi.get', (['self', '"""kind"""'], {}), "(self, 'kind')\n", (3436, 3450), False, 'import pulumi\n'), ((3523, 3554), 'pulumi.set', 'pulumi.set', (['self', '"""kind"""', 'value'], {}), "(self, 'kind', value)\n", (3533, 3554), False, 'import pulumi\n'), ((3759, 3791), 'pulumi.get', 'pulumi.get', (['self', '"""account_name"""'], {}), "(self, 'account_name')\n", (3769, 3791), False, 'import pulumi\n'), ((3890, 3929), 'pulumi.set', 'pulumi.set', (['self', '"""account_name"""', 'value'], {}), "(self, 'account_name', value)\n", (3900, 3929), False, 'import pulumi\n'), ((4178, 4217), 'pulumi.get', 'pulumi.get', (['self', '"""auto_key_config_url"""'], {}), "(self, 'auto_key_config_url')\n", (4188, 4217), False, 'import pulumi\n'), ((4330, 4376), 'pulumi.set', 'pulumi.set', (['self', '"""auto_key_config_url"""', 'value'], {}), "(self, 'auto_key_config_url', value)\n", (4340, 4376), False, 'import pulumi\n'), ((4543, 4574), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (4553, 4574), False, 'import pulumi\n'), ((4671, 4709), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (4681, 4709), False, 'import pulumi\n'), ((4922, 4960), 'pulumi.get', 'pulumi.get', (['self', '"""fluentd_config_url"""'], {}), "(self, 'fluentd_config_url')\n", (4932, 4960), False, 'import pulumi\n'), ((5057, 5102), 'pulumi.set', 'pulumi.set', (['self', '"""fluentd_config_url"""', 'value'], {}), "(self, 'fluentd_config_url', value)\n", (5067, 5102), False, 'import pulumi\n'), ((5311, 5344), 'pulumi.get', 'pulumi.get', (['self', '"""ma_config_url"""'], {}), "(self, 'ma_config_url')\n", (5321, 5344), False, 'import pulumi\n'), ((5445, 5485), 'pulumi.set', 'pulumi.set', (['self', '"""ma_config_url"""', 'value'], {}), "(self, 'ma_config_url', value)\n", (5455, 5485), False, 'import pulumi\n'), ((5696, 5720), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (5706, 5720), False, 'import pulumi\n'), ((5803, 5834), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (5813, 5834), False, 'import pulumi\n'), ((6026, 6055), 'pulumi.get', 'pulumi.get', (['self', '"""namespace"""'], {}), "(self, 'namespace')\n", (6036, 6055), False, 'import pulumi\n'), ((6148, 6184), 'pulumi.set', 'pulumi.set', (['self', '"""namespace"""', 'value'], {}), "(self, 'namespace', value)\n", (6158, 6184), False, 'import pulumi\n'), ((8965, 9001), 'pulumi.set', 'pulumi.set', (['__self__', '"""image"""', 'image'], {}), "(__self__, 'image', image)\n", (8975, 9001), False, 'import pulumi\n'), ((9010, 9044), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (9020, 9044), False, 'import pulumi\n'), ((9053, 9097), 'pulumi.set', 'pulumi.set', (['__self__', '"""resources"""', 'resources'], {}), "(__self__, 'resources', resources)\n", (9063, 9097), False, 'import pulumi\n'), ((10152, 10177), 'pulumi.get', 'pulumi.get', (['self', '"""image"""'], {}), "(self, 'image')\n", (10162, 10177), False, 'import pulumi\n'), ((10252, 10284), 'pulumi.set', 'pulumi.set', (['self', '"""image"""', 'value'], {}), "(self, 'image', value)\n", (10262, 10284), False, 'import pulumi\n'), ((10437, 10461), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (10447, 10461), False, 'import pulumi\n'), ((10534, 10565), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (10544, 10565), False, 'import pulumi\n'), ((10792, 10821), 'pulumi.get', 'pulumi.get', (['self', '"""resources"""'], {}), "(self, 'resources')\n", (10802, 10821), False, 'import pulumi\n'), ((10927, 10963), 'pulumi.set', 'pulumi.set', (['self', '"""resources"""', 'value'], {}), "(self, 'resources', value)\n", (10937, 10963), False, 'import pulumi\n'), ((11184, 11212), 'pulumi.get', 'pulumi.get', (['self', '"""commands"""'], {}), "(self, 'commands')\n", (11194, 11212), False, 'import pulumi\n'), ((11327, 11362), 'pulumi.set', 'pulumi.set', (['self', '"""commands"""', 'value'], {}), "(self, 'commands', value)\n", (11337, 11362), False, 'import pulumi\n'), ((11565, 11596), 'pulumi.get', 'pulumi.get', (['self', '"""diagnostics"""'], {}), "(self, 'diagnostics')\n", (11575, 11596), False, 'import pulumi\n'), ((11710, 11748), 'pulumi.set', 'pulumi.set', (['self', '"""diagnostics"""', 'value'], {}), "(self, 'diagnostics', value)\n", (11720, 11748), False, 'import pulumi\n'), ((11972, 12001), 'pulumi.get', 'pulumi.get', (['self', '"""endpoints"""'], {}), "(self, 'endpoints')\n", (11982, 12001), False, 'import pulumi\n'), ((12139, 12175), 'pulumi.set', 'pulumi.set', (['self', '"""endpoints"""', 'value'], {}), "(self, 'endpoints', value)\n", (12149, 12175), False, 'import pulumi\n'), ((12369, 12399), 'pulumi.get', 'pulumi.get', (['self', '"""entrypoint"""'], {}), "(self, 'entrypoint')\n", (12379, 12399), False, 'import pulumi\n'), ((12494, 12531), 'pulumi.set', 'pulumi.set', (['self', '"""entrypoint"""', 'value'], {}), "(self, 'entrypoint', value)\n", (12504, 12531), False, 'import pulumi\n'), ((12807, 12848), 'pulumi.get', 'pulumi.get', (['self', '"""environment_variables"""'], {}), "(self, 'environment_variables')\n", (12817, 12848), False, 'import pulumi\n'), ((13011, 13059), 'pulumi.set', 'pulumi.set', (['self', '"""environment_variables"""', 'value'], {}), "(self, 'environment_variables', value)\n", (13021, 13059), False, 'import pulumi\n'), ((13298, 13343), 'pulumi.get', 'pulumi.get', (['self', '"""image_registry_credential"""'], {}), "(self, 'image_registry_credential')\n", (13308, 13343), False, 'import pulumi\n'), ((13494, 13546), 'pulumi.set', 'pulumi.set', (['self', '"""image_registry_credential"""', 'value'], {}), "(self, 'image_registry_credential', value)\n", (13504, 13546), False, 'import pulumi\n'), ((13759, 13785), 'pulumi.get', 'pulumi.get', (['self', '"""labels"""'], {}), "(self, 'labels')\n", (13769, 13785), False, 'import pulumi\n'), ((13913, 13946), 'pulumi.set', 'pulumi.set', (['self', '"""labels"""', 'value'], {}), "(self, 'labels', value)\n", (13923, 13946), False, 'import pulumi\n'), ((14338, 14366), 'pulumi.get', 'pulumi.get', (['self', '"""settings"""'], {}), "(self, 'settings')\n", (14348, 14366), False, 'import pulumi\n'), ((14491, 14526), 'pulumi.set', 'pulumi.set', (['self', '"""settings"""', 'value'], {}), "(self, 'settings', value)\n", (14501, 14526), False, 'import pulumi\n'), ((14772, 14803), 'pulumi.get', 'pulumi.get', (['self', '"""volume_refs"""'], {}), "(self, 'volume_refs')\n", (14782, 14803), False, 'import pulumi\n'), ((14942, 14980), 'pulumi.set', 'pulumi.set', (['self', '"""volume_refs"""', 'value'], {}), "(self, 'volume_refs', value)\n", (14952, 14980), False, 'import pulumi\n'), ((15359, 15393), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (15369, 15393), False, 'import pulumi\n'), ((15402, 15438), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (15412, 15438), False, 'import pulumi\n'), ((15594, 15618), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (15604, 15618), False, 'import pulumi\n'), ((15691, 15722), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (15701, 15722), False, 'import pulumi\n'), ((15880, 15905), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (15890, 15905), False, 'import pulumi\n'), ((15980, 16012), 'pulumi.set', 'pulumi.set', (['self', '"""value"""', 'value'], {}), "(self, 'value', value)\n", (15990, 16012), False, 'import pulumi\n'), ((16681, 16739), 'pulumi.set', 'pulumi.set', (['__self__', '"""destination_path"""', 'destination_path'], {}), "(__self__, 'destination_path', destination_path)\n", (16691, 16739), False, 'import pulumi\n'), ((16748, 16782), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (16758, 16782), False, 'import pulumi\n'), ((17141, 17177), 'pulumi.get', 'pulumi.get', (['self', '"""destination_path"""'], {}), "(self, 'destination_path')\n", (17151, 17177), False, 'import pulumi\n'), ((17274, 17317), 'pulumi.set', 'pulumi.set', (['self', '"""destination_path"""', 'value'], {}), "(self, 'destination_path', value)\n", (17284, 17317), False, 'import pulumi\n'), ((17460, 17484), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (17470, 17484), False, 'import pulumi\n'), ((17557, 17588), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (17567, 17588), False, 'import pulumi\n'), ((17817, 17846), 'pulumi.get', 'pulumi.get', (['self', '"""read_only"""'], {}), "(self, 'read_only')\n", (17827, 17846), False, 'import pulumi\n'), ((17940, 17976), 'pulumi.set', 'pulumi.set', (['self', '"""read_only"""', 'value'], {}), "(self, 'read_only', value)\n", (17950, 17976), False, 'import pulumi\n'), ((19463, 19500), 'pulumi.get', 'pulumi.get', (['self', '"""default_sink_refs"""'], {}), "(self, 'default_sink_refs')\n", (19473, 19500), False, 'import pulumi\n'), ((19633, 19677), 'pulumi.set', 'pulumi.set', (['self', '"""default_sink_refs"""', 'value'], {}), "(self, 'default_sink_refs', value)\n", (19643, 19677), False, 'import pulumi\n'), ((19858, 19885), 'pulumi.get', 'pulumi.get', (['self', '"""enabled"""'], {}), "(self, 'enabled')\n", (19868, 19885), False, 'import pulumi\n'), ((19975, 20009), 'pulumi.set', 'pulumi.set', (['self', '"""enabled"""', 'value'], {}), "(self, 'enabled', value)\n", (19985, 20009), False, 'import pulumi\n'), ((20264, 20289), 'pulumi.get', 'pulumi.get', (['self', '"""sinks"""'], {}), "(self, 'sinks')\n", (20274, 20289), False, 'import pulumi\n'), ((20447, 20479), 'pulumi.set', 'pulumi.set', (['self', '"""sinks"""', 'value'], {}), "(self, 'sinks', value)\n", (20457, 20479), False, 'import pulumi\n'), ((21391, 21418), 'pulumi.get', 'pulumi.get', (['self', '"""enabled"""'], {}), "(self, 'enabled')\n", (21401, 21418), False, 'import pulumi\n'), ((21508, 21542), 'pulumi.set', 'pulumi.set', (['self', '"""enabled"""', 'value'], {}), "(self, 'enabled', value)\n", (21518, 21542), False, 'import pulumi\n'), ((21814, 21843), 'pulumi.get', 'pulumi.get', (['self', '"""sink_refs"""'], {}), "(self, 'sink_refs')\n", (21824, 21843), False, 'import pulumi\n'), ((21960, 21996), 'pulumi.set', 'pulumi.set', (['self', '"""sink_refs"""', 'value'], {}), "(self, 'sink_refs', value)\n", (21970, 21996), False, 'import pulumi\n'), ((22384, 22418), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (22394, 22418), False, 'import pulumi\n'), ((22643, 22667), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (22653, 22667), False, 'import pulumi\n'), ((22740, 22771), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (22750, 22771), False, 'import pulumi\n'), ((22932, 22956), 'pulumi.get', 'pulumi.get', (['self', '"""port"""'], {}), "(self, 'port')\n", (22942, 22956), False, 'import pulumi\n'), ((23039, 23070), 'pulumi.set', 'pulumi.set', (['self', '"""port"""', 'value'], {}), "(self, 'port', value)\n", (23049, 23070), False, 'import pulumi\n'), ((23839, 23863), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (23849, 23863), False, 'import pulumi\n'), ((23946, 23977), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (23956, 23977), False, 'import pulumi\n'), ((24150, 24175), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (24160, 24175), False, 'import pulumi\n'), ((24260, 24292), 'pulumi.set', 'pulumi.set', (['self', '"""value"""', 'value'], {}), "(self, 'value', value)\n", (24270, 24292), False, 'import pulumi\n'), ((24880, 24918), 'pulumi.set', 'pulumi.set', (['__self__', '"""server"""', 'server'], {}), "(__self__, 'server', server)\n", (24890, 24918), False, 'import pulumi\n'), ((24927, 24969), 'pulumi.set', 'pulumi.set', (['__self__', '"""username"""', 'username'], {}), "(__self__, 'username', username)\n", (24937, 24969), False, 'import pulumi\n'), ((25257, 25283), 'pulumi.get', 'pulumi.get', (['self', '"""server"""'], {}), "(self, 'server')\n", (25267, 25283), False, 'import pulumi\n'), ((25360, 25393), 'pulumi.set', 'pulumi.set', (['self', '"""server"""', 'value'], {}), "(self, 'server', value)\n", (25370, 25393), False, 'import pulumi\n'), ((25559, 25587), 'pulumi.get', 'pulumi.get', (['self', '"""username"""'], {}), "(self, 'username')\n", (25569, 25587), False, 'import pulumi\n'), ((25668, 25703), 'pulumi.set', 'pulumi.set', (['self', '"""username"""', 'value'], {}), "(self, 'username', value)\n", (25678, 25703), False, 'import pulumi\n'), ((25879, 25907), 'pulumi.get', 'pulumi.get', (['self', '"""password"""'], {}), "(self, 'password')\n", (25889, 25907), False, 'import pulumi\n'), ((25998, 26033), 'pulumi.set', 'pulumi.set', (['self', '"""password"""', 'value'], {}), "(self, 'password', value)\n", (26008, 26033), False, 'import pulumi\n'), ((27063, 27089), 'pulumi.get', 'pulumi.get', (['self', '"""layer4"""'], {}), "(self, 'layer4')\n", (27073, 27089), False, 'import pulumi\n'), ((27222, 27255), 'pulumi.set', 'pulumi.set', (['self', '"""layer4"""', 'value'], {}), "(self, 'layer4', value)\n", (27232, 27255), False, 'import pulumi\n'), ((27462, 27491), 'pulumi.get', 'pulumi.get', (['self', '"""qos_level"""'], {}), "(self, 'qos_level')\n", (27472, 27491), False, 'import pulumi\n'), ((27610, 27646), 'pulumi.set', 'pulumi.set', (['self', '"""qos_level"""', 'value'], {}), "(self, 'qos_level', value)\n", (27620, 27646), False, 'import pulumi\n'), ((29408, 29444), 'pulumi.get', 'pulumi.get', (['self', '"""application_name"""'], {}), "(self, 'application_name')\n", (29418, 29444), False, 'import pulumi\n'), ((29551, 29594), 'pulumi.set', 'pulumi.set', (['self', '"""application_name"""', 'value'], {}), "(self, 'application_name', value)\n", (29561, 29594), False, 'import pulumi\n'), ((29804, 29837), 'pulumi.get', 'pulumi.get', (['self', '"""endpoint_name"""'], {}), "(self, 'endpoint_name')\n", (29814, 29837), False, 'import pulumi\n'), ((29938, 29978), 'pulumi.set', 'pulumi.set', (['self', '"""endpoint_name"""', 'value'], {}), "(self, 'endpoint_name', value)\n", (29948, 29978), False, 'import pulumi\n'), ((30139, 30163), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (30149, 30163), False, 'import pulumi\n'), ((30246, 30277), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (30256, 30277), False, 'import pulumi\n'), ((30519, 30550), 'pulumi.get', 'pulumi.get', (['self', '"""public_port"""'], {}), "(self, 'public_port')\n", (30529, 30550), False, 'import pulumi\n'), ((30647, 30685), 'pulumi.set', 'pulumi.set', (['self', '"""public_port"""', 'value'], {}), "(self, 'public_port', value)\n", (30657, 30685), False, 'import pulumi\n'), ((30913, 30945), 'pulumi.get', 'pulumi.get', (['self', '"""service_name"""'], {}), "(self, 'service_name')\n", (30923, 30945), False, 'import pulumi\n'), ((31044, 31083), 'pulumi.set', 'pulumi.set', (['self', '"""service_name"""', 'value'], {}), "(self, 'service_name', value)\n", (31054, 31083), False, 'import pulumi\n'), ((31582, 31606), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (31592, 31606), False, 'import pulumi\n'), ((31689, 31720), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (31699, 31720), False, 'import pulumi\n'), ((32666, 32689), 'pulumi.get', 'pulumi.get', (['self', '"""cpu"""'], {}), "(self, 'cpu')\n", (32676, 32689), False, 'import pulumi\n'), ((32772, 32802), 'pulumi.set', 'pulumi.set', (['self', '"""cpu"""', 'value'], {}), "(self, 'cpu', value)\n", (32782, 32802), False, 'import pulumi\n'), ((32988, 33020), 'pulumi.get', 'pulumi.get', (['self', '"""memory_in_gb"""'], {}), "(self, 'memory_in_gb')\n", (32998, 33020), False, 'import pulumi\n'), ((33121, 33160), 'pulumi.set', 'pulumi.set', (['self', '"""memory_in_gb"""', 'value'], {}), "(self, 'memory_in_gb', value)\n", (33131, 33160), False, 'import pulumi\n'), ((33886, 33918), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu"""', 'cpu'], {}), "(__self__, 'cpu', cpu)\n", (33896, 33918), False, 'import pulumi\n'), ((33927, 33977), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_in_gb"""', 'memory_in_gb'], {}), "(__self__, 'memory_in_gb', memory_in_gb)\n", (33937, 33977), False, 'import pulumi\n'), ((34175, 34198), 'pulumi.get', 'pulumi.get', (['self', '"""cpu"""'], {}), "(self, 'cpu')\n", (34185, 34198), False, 'import pulumi\n'), ((34271, 34301), 'pulumi.set', 'pulumi.set', (['self', '"""cpu"""', 'value'], {}), "(self, 'cpu', value)\n", (34281, 34301), False, 'import pulumi\n'), ((34498, 34530), 'pulumi.get', 'pulumi.get', (['self', '"""memory_in_gb"""'], {}), "(self, 'memory_in_gb')\n", (34508, 34530), False, 'import pulumi\n'), ((34621, 34660), 'pulumi.set', 'pulumi.set', (['self', '"""memory_in_gb"""', 'value'], {}), "(self, 'memory_in_gb', value)\n", (34631, 34660), False, 'import pulumi\n'), ((35250, 35292), 'pulumi.set', 'pulumi.set', (['__self__', '"""requests"""', 'requests'], {}), "(__self__, 'requests', requests)\n", (35260, 35292), False, 'import pulumi\n'), ((35577, 35605), 'pulumi.get', 'pulumi.get', (['self', '"""requests"""'], {}), "(self, 'requests')\n", (35587, 35605), False, 'import pulumi\n'), ((35705, 35740), 'pulumi.set', 'pulumi.set', (['self', '"""requests"""', 'value'], {}), "(self, 'requests', value)\n", (35715, 35740), False, 'import pulumi\n'), ((35961, 35987), 'pulumi.get', 'pulumi.get', (['self', '"""limits"""'], {}), "(self, 'limits')\n", (35971, 35987), False, 'import pulumi\n'), ((36091, 36124), 'pulumi.set', 'pulumi.set', (['self', '"""limits"""', 'value'], {}), "(self, 'limits', value)\n", (36101, 36124), False, 'import pulumi\n'), ((38119, 38171), 'pulumi.set', 'pulumi.set', (['__self__', '"""code_packages"""', 'code_packages'], {}), "(__self__, 'code_packages', code_packages)\n", (38129, 38171), False, 'import pulumi\n'), ((38180, 38220), 'pulumi.set', 'pulumi.set', (['__self__', '"""os_type"""', 'os_type'], {}), "(__self__, 'os_type', os_type)\n", (38190, 38220), False, 'import pulumi\n'), ((39248, 39281), 'pulumi.get', 'pulumi.get', (['self', '"""code_packages"""'], {}), "(self, 'code_packages')\n", (39258, 39281), False, 'import pulumi\n'), ((39429, 39469), 'pulumi.set', 'pulumi.set', (['self', '"""code_packages"""', 'value'], {}), "(self, 'code_packages', value)\n", (39439, 39469), False, 'import pulumi\n'), ((39700, 39727), 'pulumi.get', 'pulumi.get', (['self', '"""os_type"""'], {}), "(self, 'os_type')\n", (39710, 39727), False, 'import pulumi\n'), ((39837, 39871), 'pulumi.set', 'pulumi.set', (['self', '"""os_type"""', 'value'], {}), "(self, 'os_type', value)\n", (39847, 39871), False, 'import pulumi\n'), ((40053, 40084), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (40063, 40084), False, 'import pulumi\n'), ((40181, 40219), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (40191, 40219), False, 'import pulumi\n'), ((40422, 40453), 'pulumi.get', 'pulumi.get', (['self', '"""diagnostics"""'], {}), "(self, 'diagnostics')\n", (40432, 40453), False, 'import pulumi\n'), ((40567, 40605), 'pulumi.set', 'pulumi.set', (['self', '"""diagnostics"""', 'value'], {}), "(self, 'diagnostics', value)\n", (40577, 40605), False, 'import pulumi\n'), ((40861, 40893), 'pulumi.get', 'pulumi.get', (['self', '"""health_state"""'], {}), "(self, 'health_state')\n", (40871, 40893), False, 'import pulumi\n'), ((41014, 41053), 'pulumi.set', 'pulumi.set', (['self', '"""health_state"""', 'value'], {}), "(self, 'health_state', value)\n", (41024, 41053), False, 'import pulumi\n'), ((41211, 41235), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (41221, 41235), False, 'import pulumi\n'), ((41318, 41349), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (41328, 41349), False, 'import pulumi\n'), ((41620, 41652), 'pulumi.get', 'pulumi.get', (['self', '"""network_refs"""'], {}), "(self, 'network_refs')\n", (41630, 41652), False, 'import pulumi\n'), ((41788, 41827), 'pulumi.set', 'pulumi.set', (['self', '"""network_refs"""', 'value'], {}), "(self, 'network_refs', value)\n", (41798, 41827), False, 'import pulumi\n'), ((42071, 42104), 'pulumi.get', 'pulumi.get', (['self', '"""replica_count"""'], {}), "(self, 'replica_count')\n", (42081, 42104), False, 'import pulumi\n'), ((42205, 42245), 'pulumi.set', 'pulumi.set', (['self', '"""replica_count"""', 'value'], {}), "(self, 'replica_count', value)\n", (42215, 42245), False, 'import pulumi\n'), ((42949, 42973), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (42959, 42973), False, 'import pulumi\n'), ((43056, 43087), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (43066, 43087), False, 'import pulumi\n'), ((43247, 43272), 'pulumi.get', 'pulumi.get', (['self', '"""value"""'], {}), "(self, 'value')\n", (43257, 43272), False, 'import pulumi\n'), ((43357, 43389), 'pulumi.set', 'pulumi.set', (['self', '"""value"""', 'value'], {}), "(self, 'value', value)\n", (43367, 43389), False, 'import pulumi\n'), ((44084, 44134), 'pulumi.set', 'pulumi.set', (['__self__', '"""account_name"""', 'account_name'], {}), "(__self__, 'account_name', account_name)\n", (44094, 44134), False, 'import pulumi\n'), ((44143, 44189), 'pulumi.set', 'pulumi.set', (['__self__', '"""share_name"""', 'share_name'], {}), "(__self__, 'share_name', share_name)\n", (44153, 44189), False, 'import pulumi\n'), ((44491, 44523), 'pulumi.get', 'pulumi.get', (['self', '"""account_name"""'], {}), "(self, 'account_name')\n", (44501, 44523), False, 'import pulumi\n'), ((44612, 44651), 'pulumi.set', 'pulumi.set', (['self', '"""account_name"""', 'value'], {}), "(self, 'account_name', value)\n", (44622, 44651), False, 'import pulumi\n'), ((44871, 44901), 'pulumi.get', 'pulumi.get', (['self', '"""share_name"""'], {}), "(self, 'share_name')\n", (44881, 44901), False, 'import pulumi\n'), ((44986, 45023), 'pulumi.set', 'pulumi.set', (['self', '"""share_name"""', 'value'], {}), "(self, 'share_name', value)\n", (44996, 45023), False, 'import pulumi\n'), ((45242, 45273), 'pulumi.get', 'pulumi.get', (['self', '"""account_key"""'], {}), "(self, 'account_key')\n", (45252, 45273), False, 'import pulumi\n'), ((45370, 45408), 'pulumi.set', 'pulumi.set', (['self', '"""account_key"""', 'value'], {}), "(self, 'account_key', value)\n", (45380, 45408), False, 'import pulumi\n'), ((2557, 2607), 'pulumi.set', 'pulumi.set', (['__self__', '"""account_name"""', 'account_name'], {}), "(__self__, 'account_name', account_name)\n", (2567, 2607), False, 'import pulumi\n'), ((2664, 2728), 'pulumi.set', 'pulumi.set', (['__self__', '"""auto_key_config_url"""', 'auto_key_config_url'], {}), "(__self__, 'auto_key_config_url', auto_key_config_url)\n", (2674, 2728), False, 'import pulumi\n'), ((2777, 2825), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (2787, 2825), False, 'import pulumi\n'), ((2881, 2943), 'pulumi.set', 'pulumi.set', (['__self__', '"""fluentd_config_url"""', 'fluentd_config_url'], {}), "(__self__, 'fluentd_config_url', fluentd_config_url)\n", (2891, 2943), False, 'import pulumi\n'), ((2994, 3046), 'pulumi.set', 'pulumi.set', (['__self__', '"""ma_config_url"""', 'ma_config_url'], {}), "(__self__, 'ma_config_url', ma_config_url)\n", (3004, 3046), False, 'import pulumi\n'), ((3088, 3122), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (3098, 3122), False, 'import pulumi\n'), ((3169, 3213), 'pulumi.set', 'pulumi.set', (['__self__', '"""namespace"""', 'namespace'], {}), "(__self__, 'namespace', namespace)\n", (3179, 3213), False, 'import pulumi\n'), ((9143, 9185), 'pulumi.set', 'pulumi.set', (['__self__', '"""commands"""', 'commands'], {}), "(__self__, 'commands', commands)\n", (9153, 9185), False, 'import pulumi\n'), ((9234, 9282), 'pulumi.set', 'pulumi.set', (['__self__', '"""diagnostics"""', 'diagnostics'], {}), "(__self__, 'diagnostics', diagnostics)\n", (9244, 9282), False, 'import pulumi\n'), ((9329, 9373), 'pulumi.set', 'pulumi.set', (['__self__', '"""endpoints"""', 'endpoints'], {}), "(__self__, 'endpoints', endpoints)\n", (9339, 9373), False, 'import pulumi\n'), ((9421, 9467), 'pulumi.set', 'pulumi.set', (['__self__', '"""entrypoint"""', 'entrypoint'], {}), "(__self__, 'entrypoint', entrypoint)\n", (9431, 9467), False, 'import pulumi\n'), ((9526, 9594), 'pulumi.set', 'pulumi.set', (['__self__', '"""environment_variables"""', 'environment_variables'], {}), "(__self__, 'environment_variables', environment_variables)\n", (9536, 9594), False, 'import pulumi\n'), ((9657, 9733), 'pulumi.set', 'pulumi.set', (['__self__', '"""image_registry_credential"""', 'image_registry_credential'], {}), "(__self__, 'image_registry_credential', image_registry_credential)\n", (9667, 9733), False, 'import pulumi\n'), ((9777, 9815), 'pulumi.set', 'pulumi.set', (['__self__', '"""labels"""', 'labels'], {}), "(__self__, 'labels', labels)\n", (9787, 9815), False, 'import pulumi\n'), ((9861, 9903), 'pulumi.set', 'pulumi.set', (['__self__', '"""settings"""', 'settings'], {}), "(__self__, 'settings', settings)\n", (9871, 9903), False, 'import pulumi\n'), ((9952, 10000), 'pulumi.set', 'pulumi.set', (['__self__', '"""volume_refs"""', 'volume_refs'], {}), "(__self__, 'volume_refs', volume_refs)\n", (9962, 10000), False, 'import pulumi\n'), ((16829, 16873), 'pulumi.set', 'pulumi.set', (['__self__', '"""read_only"""', 'read_only'], {}), "(__self__, 'read_only', read_only)\n", (16839, 16873), False, 'import pulumi\n'), ((18927, 18987), 'pulumi.set', 'pulumi.set', (['__self__', '"""default_sink_refs"""', 'default_sink_refs'], {}), "(__self__, 'default_sink_refs', default_sink_refs)\n", (18937, 18987), False, 'import pulumi\n'), ((19032, 19072), 'pulumi.set', 'pulumi.set', (['__self__', '"""enabled"""', 'enabled'], {}), "(__self__, 'enabled', enabled)\n", (19042, 19072), False, 'import pulumi\n'), ((19115, 19151), 'pulumi.set', 'pulumi.set', (['__self__', '"""sinks"""', 'sinks'], {}), "(__self__, 'sinks', sinks)\n", (19125, 19151), False, 'import pulumi\n'), ((21079, 21119), 'pulumi.set', 'pulumi.set', (['__self__', '"""enabled"""', 'enabled'], {}), "(__self__, 'enabled', enabled)\n", (21089, 21119), False, 'import pulumi\n'), ((21166, 21210), 'pulumi.set', 'pulumi.set', (['__self__', '"""sink_refs"""', 'sink_refs'], {}), "(__self__, 'sink_refs', sink_refs)\n", (21176, 21210), False, 'import pulumi\n'), ((22460, 22494), 'pulumi.set', 'pulumi.set', (['__self__', '"""port"""', 'port'], {}), "(__self__, 'port', port)\n", (22470, 22494), False, 'import pulumi\n'), ((23555, 23589), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (23565, 23589), False, 'import pulumi\n'), ((23632, 23668), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (23642, 23668), False, 'import pulumi\n'), ((25015, 25057), 'pulumi.set', 'pulumi.set', (['__self__', '"""password"""', 'password'], {}), "(__self__, 'password', password)\n", (25025, 25057), False, 'import pulumi\n'), ((26690, 26728), 'pulumi.set', 'pulumi.set', (['__self__', '"""layer4"""', 'layer4'], {}), "(__self__, 'layer4', layer4)\n", (26700, 26728), False, 'import pulumi\n'), ((26775, 26819), 'pulumi.set', 'pulumi.set', (['__self__', '"""qos_level"""', 'qos_level'], {}), "(__self__, 'qos_level', qos_level)\n", (26785, 26819), False, 'import pulumi\n'), ((28742, 28800), 'pulumi.set', 'pulumi.set', (['__self__', '"""application_name"""', 'application_name'], {}), "(__self__, 'application_name', application_name)\n", (28752, 28800), False, 'import pulumi\n'), ((28851, 28903), 'pulumi.set', 'pulumi.set', (['__self__', '"""endpoint_name"""', 'endpoint_name'], {}), "(__self__, 'endpoint_name', endpoint_name)\n", (28861, 28903), False, 'import pulumi\n'), ((28945, 28979), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (28955, 28979), False, 'import pulumi\n'), ((29028, 29076), 'pulumi.set', 'pulumi.set', (['__self__', '"""public_port"""', 'public_port'], {}), "(__self__, 'public_port', public_port)\n", (29038, 29076), False, 'import pulumi\n'), ((29126, 29176), 'pulumi.set', 'pulumi.set', (['__self__', '"""service_name"""', 'service_name'], {}), "(__self__, 'service_name', service_name)\n", (29136, 29176), False, 'import pulumi\n'), ((31394, 31428), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (31404, 31428), False, 'import pulumi\n'), ((32336, 32368), 'pulumi.set', 'pulumi.set', (['__self__', '"""cpu"""', 'cpu'], {}), "(__self__, 'cpu', cpu)\n", (32346, 32368), False, 'import pulumi\n'), ((32418, 32468), 'pulumi.set', 'pulumi.set', (['__self__', '"""memory_in_gb"""', 'memory_in_gb'], {}), "(__self__, 'memory_in_gb', memory_in_gb)\n", (32428, 32468), False, 'import pulumi\n'), ((35336, 35374), 'pulumi.set', 'pulumi.set', (['__self__', '"""limits"""', 'limits'], {}), "(__self__, 'limits', limits)\n", (35346, 35374), False, 'import pulumi\n'), ((38269, 38317), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (38279, 38317), False, 'import pulumi\n'), ((38366, 38414), 'pulumi.set', 'pulumi.set', (['__self__', '"""diagnostics"""', 'diagnostics'], {}), "(__self__, 'diagnostics', diagnostics)\n", (38376, 38414), False, 'import pulumi\n'), ((38464, 38514), 'pulumi.set', 'pulumi.set', (['__self__', '"""health_state"""', 'health_state'], {}), "(__self__, 'health_state', health_state)\n", (38474, 38514), False, 'import pulumi\n'), ((38556, 38590), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (38566, 38590), False, 'import pulumi\n'), ((38640, 38690), 'pulumi.set', 'pulumi.set', (['__self__', '"""network_refs"""', 'network_refs'], {}), "(__self__, 'network_refs', network_refs)\n", (38650, 38690), False, 'import pulumi\n'), ((38741, 38793), 'pulumi.set', 'pulumi.set', (['__self__', '"""replica_count"""', 'replica_count'], {}), "(__self__, 'replica_count', replica_count)\n", (38751, 38793), False, 'import pulumi\n'), ((42678, 42712), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (42688, 42712), False, 'import pulumi\n'), ((42755, 42791), 'pulumi.set', 'pulumi.set', (['__self__', '"""value"""', 'value'], {}), "(__self__, 'value', value)\n", (42765, 42791), False, 'import pulumi\n'), ((44238, 44286), 'pulumi.set', 'pulumi.set', (['__self__', '"""account_key"""', 'account_key'], {}), "(__self__, 'account_key', account_key)\n", (44248, 44286), False, 'import pulumi\n')]
|
""" Cisco_IOS_XR_tunnel_nve_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-nve package operational data.
This module contains definitions
for the following management objects\:
nve\: NVE operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Nve(object):
"""
NVE operational data
.. attribute:: interfaces
Table for NVE interface attributes
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Interfaces>`
.. attribute:: vnis
Table for VNIs
**type**\: :py:class:`Vnis <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Vnis>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.interfaces = Nve.Interfaces()
self.interfaces.parent = self
self.vnis = Nve.Vnis()
self.vnis.parent = self
class Vnis(object):
"""
Table for VNIs
.. attribute:: vni
The attributes for a particular VNI
**type**\: list of :py:class:`Vni <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Vnis.Vni>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vni = YList()
self.vni.parent = self
self.vni.name = 'vni'
class Vni(object):
"""
The attributes for a particular VNI
.. attribute:: vni <key>
VNI ID
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: bvi_ifh
BVI Interface Handle
**type**\: int
**range:** 0..4294967295
.. attribute:: bvi_mac
BVI MAC address
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: bvi_state
BVI Interface Oper State
**type**\: int
**range:** 0..255
.. attribute:: flags
Flags
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
NVE Interface name
**type**\: str
.. attribute:: ipv4_tbl_id
IPv4 Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: ipv6_tbl_id
IPv6 Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: mcast_flags
McastFlags
**type**\: int
**range:** 0..4294967295
.. attribute:: mcast_ipv4_address
MCAST IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: state
State
**type**\: int
**range:** \-128..127
.. attribute:: topo_id
L2RIB Topology ID
**type**\: int
**range:** 0..4294967295
.. attribute:: topo_name
L2RIB Topology Name
**type**\: str
**length:** 0..50
.. attribute:: topo_valid
TOPO ID valid flag
**type**\: bool
.. attribute:: udp_port
UDP Port
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_max
VNI Max in Range
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_min
VNI Min in Range
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_xr
VNI Number
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_id
L3 VRF ID
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
L3 VRF Name
**type**\: str
.. attribute:: vrf_vni
VRF VNI
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vni = None
self.bvi_ifh = None
self.bvi_mac = None
self.bvi_state = None
self.flags = None
self.interface_name = None
self.ipv4_tbl_id = None
self.ipv6_tbl_id = None
self.mcast_flags = None
self.mcast_ipv4_address = None
self.state = None
self.topo_id = None
self.topo_name = None
self.topo_valid = None
self.udp_port = None
self.vni_max = None
self.vni_min = None
self.vni_xr = None
self.vrf_id = None
self.vrf_name = None
self.vrf_vni = None
@property
def _common_path(self):
if self.vni is None:
raise YPYModelError('Key property vni is None')
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:vnis/Cisco-IOS-XR-tunnel-nve-oper:vni[Cisco-IOS-XR-tunnel-nve-oper:vni = ' + str(self.vni) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.vni is not None:
return True
if self.bvi_ifh is not None:
return True
if self.bvi_mac is not None:
return True
if self.bvi_state is not None:
return True
if self.flags is not None:
return True
if self.interface_name is not None:
return True
if self.ipv4_tbl_id is not None:
return True
if self.ipv6_tbl_id is not None:
return True
if self.mcast_flags is not None:
return True
if self.mcast_ipv4_address is not None:
return True
if self.state is not None:
return True
if self.topo_id is not None:
return True
if self.topo_name is not None:
return True
if self.topo_valid is not None:
return True
if self.udp_port is not None:
return True
if self.vni_max is not None:
return True
if self.vni_min is not None:
return True
if self.vni_xr is not None:
return True
if self.vrf_id is not None:
return True
if self.vrf_name is not None:
return True
if self.vrf_vni is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Vnis.Vni']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:vnis'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.vni is not None:
for child_ref in self.vni:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Vnis']['meta_info']
class Interfaces(object):
"""
Table for NVE interface attributes
.. attribute:: interface
The attributes for a particular interface
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Interfaces.Interface>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
The attributes for a particular interface
.. attribute:: interface_name <key>
Interface Name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: admin_state
Admin State
**type**\: int
**range:** \-128..127
.. attribute:: any_cast_source_interface_name
Anycast Source Interface name
**type**\: str
.. attribute:: any_cast_source_ipv4_address
Anycast Source IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: any_cast_source_state
Anycast Source Interface State
**type**\: int
**range:** \-128..127
.. attribute:: encap
Encap
**type**\: int
**range:** \-128..127
.. attribute:: flags
Flags
**type**\: int
**range:** 0..4294967295
.. attribute:: if_handle
NVE IfHandle
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: interface_name_xr
Interface name
**type**\: str
.. attribute:: source_interface_name
Source Interface name
**type**\: str
.. attribute:: source_ipv4_address
Source IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_state
Source Intf State
**type**\: int
**range:** \-128..127
.. attribute:: state
State
**type**\: int
**range:** \-128..127
.. attribute:: sync_mcast_flags
Sync McastFlags
**type**\: int
**range:** 0..4294967295
.. attribute:: sync_mcast_ipv4_address
MCAST sync group IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: udp_port
UDP Port
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.admin_state = None
self.any_cast_source_interface_name = None
self.any_cast_source_ipv4_address = None
self.any_cast_source_state = None
self.encap = None
self.flags = None
self.if_handle = None
self.interface_name_xr = None
self.source_interface_name = None
self.source_ipv4_address = None
self.source_state = None
self.state = None
self.sync_mcast_flags = None
self.sync_mcast_ipv4_address = None
self.udp_port = None
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:interfaces/Cisco-IOS-XR-tunnel-nve-oper:interface[Cisco-IOS-XR-tunnel-nve-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.admin_state is not None:
return True
if self.any_cast_source_interface_name is not None:
return True
if self.any_cast_source_ipv4_address is not None:
return True
if self.any_cast_source_state is not None:
return True
if self.encap is not None:
return True
if self.flags is not None:
return True
if self.if_handle is not None:
return True
if self.interface_name_xr is not None:
return True
if self.source_interface_name is not None:
return True
if self.source_ipv4_address is not None:
return True
if self.source_state is not None:
return True
if self.state is not None:
return True
if self.sync_mcast_flags is not None:
return True
if self.sync_mcast_ipv4_address is not None:
return True
if self.udp_port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Interfaces']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.vnis is not None and self.vnis._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve']['meta_info']
|
[
"ydk.errors.YPYModelError",
"ydk.types.YList"
] |
[((1605, 1612), 'ydk.types.YList', 'YList', ([], {}), '()\n', (1610, 1612), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((10161, 10168), 'ydk.types.YList', 'YList', ([], {}), '()\n', (10166, 10168), False, 'from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\n'), ((6463, 6504), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""Key property vni is None"""'], {}), "('Key property vni is None')\n", (6476, 6504), False, 'from ydk.errors import YPYError, YPYModelError\n'), ((14789, 14841), 'ydk.errors.YPYModelError', 'YPYModelError', (['"""Key property interface_name is None"""'], {}), "('Key property interface_name is None')\n", (14802, 14841), False, 'from ydk.errors import YPYError, YPYModelError\n')]
|
import os.path
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from numpy import dot
from numpy.linalg import norm
class NotIntegerError(Exception):
pass
# 문서를 불러와 단어로 토큰화 후, 단어들을 word_list에 저장후 word_list 반환
def doc_tokenize(doc_name):
with open(doc_name, 'rt') as fp:
string = fp.read()
word_list = word_tokenize(string)
# 유사도 계산시 정확성을 높이기 위해 큰 의미가 없는 단어인 불용어를 word_list에서 제거
word_list = [word for word in word_list if word not in stop_words]
# 소문자와 대문자로 인해 의미 구별이 되는 것을 방지하기 위해, 모든 단어를 소문자화
word_list = [word.lower() if word.islower() == False else word for word in word_list]
return word_list
# list안 word의 term frequency 값 계산 후 dict 형태로 반환
def tf(list):
tf_dict = {word : list.count(word) if word in list else 0 for word in word_zip}
return tf_dict
# list안 word의 tf 값과 idf 값을 곱하여 tf-idf 값 계산 후 알파벳 순으로 정렬하여 list 원소가 (word, tf-idf) 형식을 가진 list 형태로 반환
def tf_idf(list):
tf_dict = tf(list)
tf_idf_dict = {word : tf_dict[word] * idf_dict[word] for word in tf_dict.keys()}
return sorted(tf_idf_dict.items())
# doc_1과 doc_2 문서의 cosine 유사도를 계산 후 유사도 값을 반환
def cos_similarity(doc_1_name, doc_2_name):
# doc_1과 doc_2 문서의 tf-idf값 계산
doc_1 = tf_idf(doc_tokenize(doc_1_name))
doc_2 = tf_idf(doc_tokenize(doc_2_name))
# doc_1의 word의 tf-idf 값을 vactor_1에 할당
vector_1 = [value[1] for value in doc_1]
# doc_2의 word의 tf-idf 값을 vactor_2에 할당
vector_2 = [value[1] for value in doc_2]
# vector_1과 vector_2 사이의 각도를 구한후 100을 곱하여 % 수치로 반환, 소숫점 2자리까지 반올림
return round((dot(vector_1, vector_2) / (norm(vector_1) * norm(vector_2)))*100, 2)
while True:
try:
# 문서 수 입력
doc_count = float(input('Please enter the count of documents : '))
if doc_count % 1 != 0:
raise NotIntegerError()
doc_count = int(doc_count)
doc_name_list = []
i = 0
while i < doc_count:
doc_name = input(f'Please enter the name of documents [{i + 1}{"/"}{doc_count}] : ') + ".txt"
# 존재하지 않은 문서 이름을 입력시 다시 입력, 존재하는 문서 입력시 doc_name_list에 할당
if os.path.isfile(doc_name):
doc_name_list.append(doc_name)
i += 1
else:
print('Please enter the name of an existing document.')
break
except ValueError:
# 문서 수를 입력할 때 숫자를 입력하지 않으면 excpet 발생
print('Please enter the number.')
except NotIntegerError:
# 문서 수를 입력할 때 정수를 입력하지 않으면 excpet 발생
print('Please enter the integer.')
stop_words = set(stopwords.words('english'))
# idf 값을 계산하기 위해 모든 문서를 doc_zip에 할당
doc_zip = [doc_tokenize(name) for name in doc_name_list]
# tf-idf 값을 계산하기 위해 모든 문서의 단어를 중복되지 않게 word_zip에 할당
word_zip = list(set([word for doc in doc_zip for word in doc]))
# 각 단어마다 inverse document frequency 값 계산 후 dict에 할당
idf_dict = {}
for word in word_zip:
word_count = 0
for doc in doc_zip:
if word in doc:
word_count += 1
idf_dict[word] = np.log((1 + doc_count) / (word_count))
# 경로 상의 모든 문서의 서로 간의 유사도를 계산 후 similarity_dict에 저장
similarity_dict = {(doc_name_list[i], doc_name_list[j]) : cos_similarity(doc_name_list[i], doc_name_list[j]) for i in range(len(doc_name_list)-1) for j in range(i+1, doc_count)}
# 유사도가 가장 큰 문서 2개를 계산 후 출력
key_min = max(similarity_dict.keys(), key = lambda x: similarity_dict[x])
value_min = max(similarity_dict.values())
print(f"The similarity between {key_min[0]} and {key_min[1]} is highest at {value_min}%")
|
[
"numpy.log",
"numpy.linalg.norm",
"nltk.corpus.stopwords.words",
"numpy.dot",
"nltk.tokenize.word_tokenize"
] |
[((369, 390), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['string'], {}), '(string)\n', (382, 390), False, 'from nltk.tokenize import word_tokenize\n'), ((2598, 2624), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2613, 2624), False, 'from nltk.corpus import stopwords\n'), ((3042, 3078), 'numpy.log', 'np.log', (['((1 + doc_count) / word_count)'], {}), '((1 + doc_count) / word_count)\n', (3048, 3078), True, 'import numpy as np\n'), ((1603, 1626), 'numpy.dot', 'dot', (['vector_1', 'vector_2'], {}), '(vector_1, vector_2)\n', (1606, 1626), False, 'from numpy import dot\n'), ((1630, 1644), 'numpy.linalg.norm', 'norm', (['vector_1'], {}), '(vector_1)\n', (1634, 1644), False, 'from numpy.linalg import norm\n'), ((1647, 1661), 'numpy.linalg.norm', 'norm', (['vector_2'], {}), '(vector_2)\n', (1651, 1661), False, 'from numpy.linalg import norm\n')]
|
# coding: utf-8
"""
CloudEndure API documentation
© 2017 CloudEndure All rights reserved # General Request authentication in CloudEndure's API is done using session cookies. A session cookie is returned upon successful execution of the \"login\" method. This value must then be provided within the request headers of all subsequent API requests. ## Errors Some errors are not specifically written in every method since they may always return. Those are: 1) 401 (Unauthorized) - for unauthenticated requests. 2) 405 (Method Not Allowed) - for using a method that is not supported (POST instead of GET). 3) 403 (Forbidden) - request is authenticated, but the user is not allowed to access. 4) 422 (Unprocessable Entity) - for invalid input. ## Formats All strings with date-time format are according to RFC3339. All strings with \"duration\" format are according to ISO8601. For example, a full day duration can be specified with \"PNNNND\". # noqa: E501
OpenAPI spec version: 5
Contact: https://bit.ly/2T54hSc
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CloudEndureUser:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"username": "str",
"status": "str",
"account": "str",
"roles": "list[str]",
"settings": "object",
"api_token": "str",
"has_password": "bool",
"terms_accepted": "bool",
"id": "str",
"self_link": "str",
}
attribute_map = {
"username": "username",
"status": "status",
"account": "account",
"roles": "roles",
"settings": "settings",
"api_token": "apiToken",
"has_password": "<PASSWORD>",
"terms_accepted": "termsAccepted",
"id": "id",
"self_link": "selfLink",
}
def __init__(
self,
username=None,
status=None,
account=None,
roles=None,
settings=None,
api_token=None,
has_password=None,
terms_accepted=None,
id=None,
self_link=None,
): # noqa: E501
"""CloudEndureUser - a model defined in Swagger""" # noqa: E501
self._username = None
self._status = None
self._account = None
self._roles = None
self._settings = None
self._api_token = None
self._has_password = None
self._terms_accepted = None
self._id = None
self._self_link = None
self.discriminator = None
if username is not None:
self.username = username
if status is not None:
self.status = status
if account is not None:
self.account = account
if roles is not None:
self.roles = roles
if settings is not None:
self.settings = settings
if api_token is not None:
self.api_token = api_token
if has_password is not None:
self.has_password = has_password
if terms_accepted is not None:
self.terms_accepted = terms_accepted
if id is not None:
self.id = id
if self_link is not None:
self.self_link = self_link
@property
def username(self):
"""Gets the username of this CloudEndureUser. # noqa: E501
:return: The username of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CloudEndureUser.
:param username: The username of this CloudEndureUser. # noqa: E501
:type: str
"""
self._username = username
@property
def status(self):
"""Gets the status of this CloudEndureUser. # noqa: E501
:return: The status of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CloudEndureUser.
:param status: The status of this CloudEndureUser. # noqa: E501
:type: str
"""
allowed_values = ["PENDING", "CONFIRMED", "DELETED"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format( # noqa: E501
status, allowed_values
)
)
self._status = status
@property
def account(self):
"""Gets the account of this CloudEndureUser. # noqa: E501
:return: The account of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this CloudEndureUser.
:param account: The account of this CloudEndureUser. # noqa: E501
:type: str
"""
self._account = account
@property
def roles(self):
"""Gets the roles of this CloudEndureUser. # noqa: E501
:return: The roles of this CloudEndureUser. # noqa: E501
:rtype: list[str]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""Sets the roles of this CloudEndureUser.
:param roles: The roles of this CloudEndureUser. # noqa: E501
:type: list[str]
"""
allowed_values = [
"USER",
"ACCOUNT_ADMIN",
"ACCOUNT_OWNER",
"GLOBAL_READONLY",
] # noqa: E501
if not set(roles).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `roles` [{0}], must be a subset of [{1}]".format( # noqa: E501
", ".join(map(str, set(roles) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)),
)
)
self._roles = roles
@property
def settings(self):
"""Gets the settings of this CloudEndureUser. # noqa: E501
:return: The settings of this CloudEndureUser. # noqa: E501
:rtype: object
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this CloudEndureUser.
:param settings: The settings of this CloudEndureUser. # noqa: E501
:type: object
"""
self._settings = settings
@property
def api_token(self):
"""Gets the api_token of this CloudEndureUser. # noqa: E501
:return: The api_token of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._api_token
@api_token.setter
def api_token(self, api_token):
"""Sets the api_token of this CloudEndureUser.
:param api_token: The api_token of this CloudEndureUser. # noqa: E501
:type: str
"""
self._api_token = api_token
@property
def has_password(self):
"""Gets the has_password of this CloudEndureUser. # noqa: E501
:return: The has_password of this CloudEndureUser. # noqa: E501
:rtype: bool
"""
return self._has_password
@has_password.setter
def has_password(self, has_password):
"""Sets the has_password of this CloudEndureUser.
:param has_password: The has_password of this CloudEndureUser. # noqa: E501
:type: bool
"""
self._has_password = has_password
@property
def terms_accepted(self):
"""Gets the terms_accepted of this CloudEndureUser. # noqa: E501
todo one-way; cannot be set at time of POST # noqa: E501
:return: The terms_accepted of this CloudEndureUser. # noqa: E501
:rtype: bool
"""
return self._terms_accepted
@terms_accepted.setter
def terms_accepted(self, terms_accepted):
"""Sets the terms_accepted of this CloudEndureUser.
todo one-way; cannot be set at time of POST # noqa: E501
:param terms_accepted: The terms_accepted of this CloudEndureUser. # noqa: E501
:type: bool
"""
self._terms_accepted = terms_accepted
@property
def id(self):
"""Gets the id of this CloudEndureUser. # noqa: E501
:return: The id of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CloudEndureUser.
:param id: The id of this CloudEndureUser. # noqa: E501
:type: str
"""
self._id = id
@property
def self_link(self):
"""Gets the self_link of this CloudEndureUser. # noqa: E501
:return: The self_link of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""Sets the self_link of this CloudEndureUser.
:param self_link: The self_link of this CloudEndureUser. # noqa: E501
:type: str
"""
self._self_link = self_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CloudEndureUser, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudEndureUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((9688, 9721), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (9701, 9721), False, 'import six\n')]
|
"""
NCL_conwomap_2.py
=================
This script illustrates the following concepts:
- Drawing a simple filled contour plot
- Selecting a different color map
- Changing the size/shape of a contour plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/conwomap_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/conwomap_2_lg.png
"""
import cartopy.crs as ccrs
import geocat.datafiles as gdf
import matplotlib.pyplot as plt
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
from geocat.viz import cmaps as gvcmaps
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/cone.nc"))
u = ds.u.isel(time=4)
###############################################################################
# Plot:
# Generate figure (set its size (width, height) in inches)
plt.figure(figsize=(10, 6))
# Generate axes, using Cartopy
projection = ccrs.PlateCarree()
ax = plt.axes(projection=projection)
# Import an NCL colormap
newcmp = gvcmaps.gui_default
# Contourf-plot data (for filled contours)
p = u.plot.contourf(ax=ax,
vmin=-1,
vmax=10,
levels=12,
cmap=newcmp,
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Contour-plot data (for borderlines)
u.plot.contour(ax=ax,
vmin=-1,
vmax=10,
levels=12,
linewidths=0.5,
colors='black',
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Add horizontal colorbar
cbar = plt.colorbar(p, orientation='horizontal', shrink=0.5)
cbar.ax.tick_params(labelsize=16)
cbar.set_ticks(np.linspace(0, 9, 10))
# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions
gvutil.set_axes_limits_and_ticks(ax,
xlim=(0, 49),
ylim=(0, 29),
xticks=np.linspace(0, 40, 5),
yticks=np.linspace(0, 25, 6))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=5,
y_minor_per_major=5,
labelsize=16)
# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.
gvutil.set_titles_and_labels(ax,
lefttitle="Cone amplitude",
lefttitlefontsize=18,
righttitle="ndim",
righttitlefontsize=18,
xlabel="X",
ylabel="Y",
labelfontsize=18)
# Show the plot
plt.show()
|
[
"matplotlib.pyplot.show",
"geocat.viz.util.set_titles_and_labels",
"matplotlib.pyplot.axes",
"geocat.datafiles.get",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"geocat.viz.util.add_major_minor_ticks",
"numpy.linspace",
"cartopy.crs.PlateCarree"
] |
[((1170, 1197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1180, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1261), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1259, 1261), True, 'import cartopy.crs as ccrs\n'), ((1267, 1298), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'projection'}), '(projection=projection)\n', (1275, 1298), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2126), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'orientation': '"""horizontal"""', 'shrink': '(0.5)'}), "(p, orientation='horizontal', shrink=0.5)\n", (2085, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2748), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax'], {'x_minor_per_major': '(5)', 'y_minor_per_major': '(5)', 'labelsize': '(16)'}), '(ax, x_minor_per_major=5, y_minor_per_major=5,\n labelsize=16)\n', (2684, 2748), True, 'from geocat.viz import util as gvutil\n'), ((2926, 3101), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax'], {'lefttitle': '"""Cone amplitude"""', 'lefttitlefontsize': '(18)', 'righttitle': '"""ndim"""', 'righttitlefontsize': '(18)', 'xlabel': '"""X"""', 'ylabel': '"""Y"""', 'labelfontsize': '(18)'}), "(ax, lefttitle='Cone amplitude',\n lefttitlefontsize=18, righttitle='ndim', righttitlefontsize=18, xlabel=\n 'X', ylabel='Y', labelfontsize=18)\n", (2954, 3101), True, 'from geocat.viz import util as gvutil\n'), ((3313, 3323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3321, 3323), True, 'import matplotlib.pyplot as plt\n'), ((966, 997), 'geocat.datafiles.get', 'gdf.get', (['"""netcdf_files/cone.nc"""'], {}), "('netcdf_files/cone.nc')\n", (973, 997), True, 'import geocat.datafiles as gdf\n'), ((2176, 2197), 'numpy.linspace', 'np.linspace', (['(0)', '(9)', '(10)'], {}), '(0, 9, 10)\n', (2187, 2197), True, 'import numpy as np\n'), ((2492, 2513), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(5)'], {}), '(0, 40, 5)\n', (2503, 2513), True, 'import numpy as np\n'), ((2555, 2576), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(6)'], {}), '(0, 25, 6)\n', (2566, 2576), True, 'import numpy as np\n')]
|
# -*- coding: gbk -*-
path1 = u'K:\\选择删除\\' #所需修改文件夹所在路径
import os
import zhconv
for parent, dirnames, filenames in os.walk(path1):
for filename in filenames:
try:
os.rename(os.path.join(parent, filename), os.path.join(parent, zhconv.convert(filename, 'zh-cn')))
#print(zhconv.convert(filename, 'zh-cn'))
except:
print("文件重命名错误" + str(filename))
import os, sys
dirs = os.listdir(path1)
for dir in dirs:
try:
os.rename(path1+str(dir),path1+zhconv.convert(dir, 'zh-cn'))
#print(zhconv.convert(dir, 'zh-cn'))
except:
print("目录重命名错误" + path1+str(dir))
|
[
"os.walk",
"os.path.join",
"os.listdir",
"zhconv.convert"
] |
[((116, 130), 'os.walk', 'os.walk', (['path1'], {}), '(path1)\n', (123, 130), False, 'import os, sys\n'), ((430, 447), 'os.listdir', 'os.listdir', (['path1'], {}), '(path1)\n', (440, 447), False, 'import os, sys\n'), ((198, 228), 'os.path.join', 'os.path.join', (['parent', 'filename'], {}), '(parent, filename)\n', (210, 228), False, 'import os, sys\n'), ((516, 544), 'zhconv.convert', 'zhconv.convert', (['dir', '"""zh-cn"""'], {}), "(dir, 'zh-cn')\n", (530, 544), False, 'import zhconv\n'), ((251, 284), 'zhconv.convert', 'zhconv.convert', (['filename', '"""zh-cn"""'], {}), "(filename, 'zh-cn')\n", (265, 284), False, 'import zhconv\n')]
|
from itertools import count
import time
from ..core import np, auto_grad_logp, AUTOGRAD
from ..parallel import parallel
from ..progressbar import update_progress
from ..state import State, func_var_names
from ..model import init_model
class Sampler(object):
def __init__(self, logp, start,
grad_logp=None,
scale=None,
condition=None,
grad_logp_flag=True,
random_seed=None):
self.model = init_model(logp, grad_logp, grad_logp_flag)
self._logp_func = logp
self._grad_func = grad_logp
self.var_names = func_var_names(logp)
self.state = State.fromkeys(self.var_names)
self.state.update(start)
self.scale = default_scale(scale, self.state)
self.sampler = None
self._sampled = 0
self._accepted = 0
self.conditional = condition
self._grad_logp_flag = grad_logp_flag
self.seed = random_seed
if random_seed:
np.random.seed(random_seed)
if condition is not None:
self._joint_logp = self._logp_func
def _conditional_step(self):
""" Build a conditional logp and sample from it. """
if self.conditional is None:
return self.step()
frozen_vars = self.conditional
frozen_state = self.state
free_vars = [var for var in self.state if var not in frozen_vars]
def conditional_logp(*args):
conditional_state = State([each for each in zip(free_vars, args)])
# Insert conditional values here, then pass to full logp
for i in frozen_vars:
conditional_state.update({i: frozen_state[i]})
return self._joint_logp(**conditional_state)
self.state = State([(var, frozen_state[var]) for var in free_vars])
self._logp_func = conditional_logp
if self._grad_logp_flag and AUTOGRAD:
self.model.grad_func = auto_grad_logp(conditional_logp, names=self.state.keys())
self.model.logp_func = self._logp_func
state = self.step()
# Add the frozen variables back into the state
new_state = State([(name, None) for name in self.var_names])
for var in state:
new_state.update({var: state[var]})
for var in frozen_vars:
new_state.update({var: frozen_state[var]})
self.state = new_state
return self.state
def step(self):
""" This is what you define to create the sampler. Requires that a
:ref:`state <state>` object is returned."""
pass
def sample(self, num, burn=0, thin=1, n_chains=1, progress_bar=True):
"""
Sample from :math:`P(X)`
:param num: *int.* Number of samples to draw from :math:`P(X)`.
:param burn: (optional) *int.*
Number of samples to discard from the beginning of the chain.
:param thin: (optional) *float.*
Thin the samples by this factor.
:param n_chains: (optional) *int.*
Number of chains to return. Each chain is given its own
process and the OS decides how to distribute the processes.
:param progress_bar: (optional) *boolean.*
Show the progress bar, default = True.
:return: Record array with fields taken from arguments of
logp function.
"""
if self.seed is not None:
np.random.seed(self.seed)
if AUTOGRAD and hasattr(self.model, 'grad_func') \
and self.model.grad_func is None:
self.model.grad_func = auto_grad_logp(self._logp_func)
# Constructing a recarray to store samples
dtypes = [(var, 'f8', np.shape(self.state[var])) for var in self.state]
samples = np.zeros(num, dtype=dtypes).view(np.recarray)
if n_chains != 1:
return parallel(self, n_chains, samples,
burn=burn, thin=thin,
progress_bar=progress_bar)
if self.sampler is None:
self.sampler = (self.step() for _ in count(start=0, step=1))
start_time = time.time() # For progress bar
for i in range(num):
samples[i] = tuple(next(self.sampler).values())
if progress_bar and time.time() - start_time > 1:
update_progress(i+1, num)
start_time = time.time()
if progress_bar:
update_progress(i+1, num, end=True)
# Clearing the cache after a run to save on memory.
self.model.clear_cache()
return samples[burn::thin]
def default_scale(scale, state):
""" If scale is None, return a State object with arrays of ones matching
the shape of values in state.
"""
if scale is None:
new_scale = State.fromkeys(state.keys())
for var in state:
new_scale.update({var: np.ones(np.shape(state[var]))})
return new_scale
else:
return scale
|
[
"itertools.count",
"time.time"
] |
[((4233, 4244), 'time.time', 'time.time', ([], {}), '()\n', (4242, 4244), False, 'import time\n'), ((4487, 4498), 'time.time', 'time.time', ([], {}), '()\n', (4496, 4498), False, 'import time\n'), ((4187, 4209), 'itertools.count', 'count', ([], {'start': '(0)', 'step': '(1)'}), '(start=0, step=1)\n', (4192, 4209), False, 'from itertools import count\n'), ((4386, 4397), 'time.time', 'time.time', ([], {}), '()\n', (4395, 4397), False, 'import time\n')]
|
import datetime
from flask_restx import Namespace, Resource, fields, marshal
from flask import request
from . import model
from .schedulerweb_util import get_db
api = Namespace('Zones', title="Zone management")
a_zone = api.model('Zone', {
'zone_id': fields.Integer(description="ID of zone"),
'name': fields.String(),
'boiler_relay': fields.String(
description="Identifier of boiler relay for this zone."),
'sensor_id': fields.Integer(
description="Identifier of sensor for this zone.."),
})
@api.route("/")
class ListZones(Resource):
@api.marshal_list_with(a_zone)
def get(self):
db = get_db()
zones = model.Zone.all_from_db(db)
return zones
an_override = api.model("Temperature target override", {
'zone': fields.Integer(description="Which zone it applies to"),
'end': fields.DateTime(description="Date/time the override ends"),
'temp': fields.Float(description="Target temperature during override"),
})
@api.route("/<int:zone_id>/override")
class Override(Resource):
"""Temperature override for a zone."""
@api.response(code=200, model=an_override, description="OK")
@api.response(code=204, description="No overrides active")
def get(self, zone_id):
"""Get temperature override for zone.
Returns no override if an override was in place but has expired."""
# XXX we shouldn't be deciding on the server whether an override is
# active since it doesn't tell us whether the device is implementing it
# or not. This should move to target/reported state model.
now = datetime.datetime.now()
db = get_db()
overrides = model.TargetOverride.from_db(db, [zone_id])
if not overrides:
return None, 204
assert len(overrides) == 1, "Only support one override per zone"
override = overrides[0]
if override.end > now:
return marshal(override, an_override), 200
return None, 204
@api.doc(params={
"temp": {'description': "The override temperature to set.",
'type': float, 'required': True, 'in': 'formData'},
"days": {"type": int, "in": "formData"},
"hours": {"type": int, "in": "formData"},
"mins": {"type": int, "in": "formData"},
})
def post(self, zone_id):
"""Configure a temperature override.
Sepcify at least one of hours, mins, secs for duration."""
try:
secs = 0
if 'days' in request.form:
secs += int(request.form['days']) * 60 * 60 * 24
if 'hours' in request.form:
secs += int(request.form['hours']) * 60 * 60
if 'mins' in request.form:
secs += int(request.form['mins']) * 60
if not secs:
return 'Must specify days, hours, or mins', 400
duration = datetime.timedelta(0, secs)
temp = float(request.form['temp'])
except ValueError:
return '', 400
now = datetime.datetime.now()
end = now + duration
db = get_db()
override = model.TargetOverride(end, temp, zone_id)
override.save(db)
db.commit()
return ('', 200)
def delete(self, zone_id):
"""Clear temperature override."""
db = get_db()
model.TargetOverride.clear_from_db(db, zone_id)
db.commit()
return '', 200
a_gradient_measurement = api.model('Temperature gradient', {
'when': fields.DateTime(
description="Date/time the measurement was taken."),
'delta': fields.Float(
description="Difference between inside and "
"outside temperature at the start of the measurement."),
'gradient': fields.Float(
description="The temperature gradient in degrees C per "
"hour."),
})
a_gradient_average = api.model('Temperature gradient average', {
'delta': fields.Float(
description="Difference between inside and outside temperature"),
'gradient': fields.Float(
description="Average temperature gradient with heating on at "
"temperature difference of delta."),
'npoints': fields.Integer(
description="Number of data points contributing to the average "
"value given."),
})
@api.route('/<int:zone_id>/gradient_measurements')
class Gradient(Resource):
@api.expect(a_gradient_measurement)
def post(self, zone_id):
tgm = model.TemperatureGradientMeasurement(
zone_id, api.payload['when'], api.payload['delta'],
api.payload['gradient'])
db = get_db()
tgm.save(db)
db.commit()
@api.route('/<int:zone_id>/gradients')
class GradientTable(Resource):
@api.marshal_list_with(a_gradient_average)
def get(self, zone_id):
db = get_db()
r = model.TemperatureGradientMeasurement.get_gradient_table(
db, zone_id)
return r
a_device_state = api.model('Device reported state', {
'time_to_target': fields.Integer(
description="Seconds until target reached."),
'state': fields.String(description="State of device."),
'target': fields.Float(
description="Target the device is working towards."),
'current_temp': fields.Float(
description="Current temperature reported by the device."),
'target_overridden': fields.Boolean(
descripton="Whether the target temperature has been overridden"),
"current_outside_temp": fields.Float(
description="Current outside temperature reported by the device."),
"dutycycle": fields.Float(description="Dutycycle for boiler"),
})
@api.route('/<int:zone_id>/reported_state')
@api.param('zone_id', 'Zone ID for the time to target.')
class ReportedState(Resource):
@api.expect(a_device_state)
def post(self, zone_id):
db = get_db()
device_state = model.DeviceState(
datetime.datetime.now(),
zone_id, api.payload['state'], api.payload['target'],
api.payload['current_temp'], api.payload['time_to_target'],
api.payload['current_outside_temp'], api.payload['dutycycle'])
device_state.save(db)
db.commit()
@api.marshal_with(a_device_state)
def get(self, zone_id):
db = get_db()
device_state = model.DeviceState.last_from_db(db, zone_id)
db.commit()
return device_state
@api.route('/<int:zone_id>/schedule')
@api.param('zone_id', 'Zone ID for the schedule.')
class ZoneSchedule(Resource):
def get(self, zone_id):
db = get_db()
schedule = model.FullSchedule.from_db(db, zone_id)
db.commit()
entries = []
for (dow, time, _, temp) in schedule:
entries.append({
'day': dow,
'time': time.strftime('%H:%M'),
'temp': temp,
})
return entries
|
[
"flask_restx.fields.Float",
"flask_restx.fields.Boolean",
"flask_restx.fields.Integer",
"flask_restx.fields.DateTime",
"flask_restx.Namespace",
"flask_restx.fields.String",
"datetime.timedelta",
"datetime.datetime.now",
"flask_restx.marshal"
] |
[((170, 213), 'flask_restx.Namespace', 'Namespace', (['"""Zones"""'], {'title': '"""Zone management"""'}), "('Zones', title='Zone management')\n", (179, 213), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((260, 300), 'flask_restx.fields.Integer', 'fields.Integer', ([], {'description': '"""ID of zone"""'}), "(description='ID of zone')\n", (274, 300), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((314, 329), 'flask_restx.fields.String', 'fields.String', ([], {}), '()\n', (327, 329), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((351, 421), 'flask_restx.fields.String', 'fields.String', ([], {'description': '"""Identifier of boiler relay for this zone."""'}), "(description='Identifier of boiler relay for this zone.')\n", (364, 421), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((449, 515), 'flask_restx.fields.Integer', 'fields.Integer', ([], {'description': '"""Identifier of sensor for this zone.."""'}), "(description='Identifier of sensor for this zone..')\n", (463, 515), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((789, 843), 'flask_restx.fields.Integer', 'fields.Integer', ([], {'description': '"""Which zone it applies to"""'}), "(description='Which zone it applies to')\n", (803, 843), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((856, 914), 'flask_restx.fields.DateTime', 'fields.DateTime', ([], {'description': '"""Date/time the override ends"""'}), "(description='Date/time the override ends')\n", (871, 914), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((928, 990), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Target temperature during override"""'}), "(description='Target temperature during override')\n", (940, 990), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((1625, 1648), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1646, 1648), False, 'import datetime\n'), ((3053, 3076), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3074, 3076), False, 'import datetime\n'), ((3531, 3598), 'flask_restx.fields.DateTime', 'fields.DateTime', ([], {'description': '"""Date/time the measurement was taken."""'}), "(description='Date/time the measurement was taken.')\n", (3546, 3598), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((3622, 3742), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Difference between inside and outside temperature at the start of the measurement."""'}), "(description=\n 'Difference between inside and outside temperature at the start of the measurement.'\n )\n", (3634, 3742), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((3770, 3845), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""The temperature gradient in degrees C per hour."""'}), "(description='The temperature gradient in degrees C per hour.')\n", (3782, 3845), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((3952, 4029), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Difference between inside and outside temperature"""'}), "(description='Difference between inside and outside temperature')\n", (3964, 4029), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((4056, 4174), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Average temperature gradient with heating on at temperature difference of delta."""'}), "(description=\n 'Average temperature gradient with heating on at temperature difference of delta.'\n )\n", (4068, 4174), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((4201, 4298), 'flask_restx.fields.Integer', 'fields.Integer', ([], {'description': '"""Number of data points contributing to the average value given."""'}), "(description=\n 'Number of data points contributing to the average value given.')\n", (4215, 4298), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5056, 5115), 'flask_restx.fields.Integer', 'fields.Integer', ([], {'description': '"""Seconds until target reached."""'}), "(description='Seconds until target reached.')\n", (5070, 5115), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5139, 5184), 'flask_restx.fields.String', 'fields.String', ([], {'description': '"""State of device."""'}), "(description='State of device.')\n", (5152, 5184), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5200, 5265), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Target the device is working towards."""'}), "(description='Target the device is working towards.')\n", (5212, 5265), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5296, 5367), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Current temperature reported by the device."""'}), "(description='Current temperature reported by the device.')\n", (5308, 5367), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5403, 5482), 'flask_restx.fields.Boolean', 'fields.Boolean', ([], {'descripton': '"""Whether the target temperature has been overridden"""'}), "(descripton='Whether the target temperature has been overridden')\n", (5417, 5482), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5521, 5600), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Current outside temperature reported by the device."""'}), "(description='Current outside temperature reported by the device.')\n", (5533, 5600), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((5628, 5676), 'flask_restx.fields.Float', 'fields.Float', ([], {'description': '"""Dutycycle for boiler"""'}), "(description='Dutycycle for boiler')\n", (5640, 5676), False, 'from flask_restx import Namespace, Resource, fields, marshal\n'), ((2910, 2937), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'secs'], {}), '(0, secs)\n', (2928, 2937), False, 'import datetime\n'), ((5960, 5983), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5981, 5983), False, 'import datetime\n'), ((1946, 1976), 'flask_restx.marshal', 'marshal', (['override', 'an_override'], {}), '(override, an_override)\n', (1953, 1976), False, 'from flask_restx import Namespace, Resource, fields, marshal\n')]
|
import os
import logging
import glob
import pathlib
import argparse
import multiprocessing as mp
import cv2
#import matplotlib.pyplot as plt
logging.basicConfig(
format="%(asctime)s: %(levelname)s: %(message)s", level=logging.INFO
)
def parse_arguments():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--image-directory",
default="data/carlacarsv2/images/256",
type=str,
help="Directory containing images to downsample.",
)
argparser.add_argument(
"--out-directory",
default="data/carlacarsv2/images/64",
type=str,
help="Directory to downsample images.",
)
argparser.add_argument(
"--size",
default=64,
type=int,
help="Target size of image to downsample.",
)
return argparser.parse_args()
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts as lists, maintaining order.
Taken from more-itertools with minor modification."""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(list(seq[start:stop]))
return ret
def path_to_filename(path, with_suffix=True):
"""Get filename from path.
Parameters
==========
path : str
Path to retrieve file name from e.g. '/path/to/image.png'.
with_suffix : bool
Whether to include the suffix of file path in file name.
Returns
=======
str
The file name of the path e.g. 'image.png'
or 'image' if `with_suffix` is false.
"""
p = pathlib.Path(path)
if with_suffix:
return str(p.name)
else:
return str(p.with_suffix("").name)
def downsample_image(infile_path, outfile_path, downsample_size):
image = cv2.imread(infile_path, flags=cv2.IMREAD_COLOR)
resized_image = cv2.resize(
image,
(downsample_size, downsample_size),
interpolation=cv2.INTER_AREA
)
cv2.imwrite(outfile_path, resized_image)
#def show_image(infile_path):
# image = cv2.imread(infile_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plt.imshow(image)
# plt.show()
"""
cv2 reads BGR as its default colour order for images, matplotlib uses RGB.
Switching the ordering of the color channels doesn't cost any compute
as the underlying pixel values don't change. Make sure to use `cv2.cvtColor()` to
change the color ordering passing image to some other library API.
https://stackoverflow.com/questions/50963283/python-opencv-imshow-doesnt-need-convert-from-bgr-to-rgb
https://stackoverflow.com/questions/39316447/opencv-giving-wrong-color-to-colored-images-on-loading
"""
def worker_task(infile_paths, out_directory, downsample_size):
for infile_path in infile_paths:
fn = path_to_filename(infile_path, with_suffix=True)
outfile_path = os.path.join(out_directory, fn)
downsample_image(infile_path, outfile_path, downsample_size)
def main():
config = parse_arguments()
os.makedirs(config.out_directory, exist_ok=True)
paths = glob.glob(os.path.join(config.image_directory, "*.png"))
cpu_count = mp.cpu_count()
n_processes = min(cpu_count, len(paths) // 5)
logging.info(f"Found {len(paths)} images in image directory {config.image_directory}")
logging.info(f"There are {cpu_count} CPUs, using {n_processes} of them.")
processes = []
for infile_paths in divide(n_processes, paths):
p = mp.Process(
target=worker_task,
args=(infile_paths, config.out_directory, config.size)
)
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"cv2.imwrite",
"cv2.imread",
"pathlib.Path",
"logging.info",
"multiprocessing.Process",
"os.path.join",
"cv2.resize",
"multiprocessing.cpu_count"
] |
[((143, 237), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s: %(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s: %(levelname)s: %(message)s', level\n =logging.INFO)\n", (162, 237), False, 'import logging\n'), ((279, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (302, 323), False, 'import argparse\n'), ((1844, 1862), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (1856, 1862), False, 'import pathlib\n'), ((2042, 2089), 'cv2.imread', 'cv2.imread', (['infile_path'], {'flags': 'cv2.IMREAD_COLOR'}), '(infile_path, flags=cv2.IMREAD_COLOR)\n', (2052, 2089), False, 'import cv2\n'), ((2110, 2198), 'cv2.resize', 'cv2.resize', (['image', '(downsample_size, downsample_size)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (downsample_size, downsample_size), interpolation=cv2.\n INTER_AREA)\n', (2120, 2198), False, 'import cv2\n'), ((2228, 2268), 'cv2.imwrite', 'cv2.imwrite', (['outfile_path', 'resized_image'], {}), '(outfile_path, resized_image)\n', (2239, 2268), False, 'import cv2\n'), ((3270, 3318), 'os.makedirs', 'os.makedirs', (['config.out_directory'], {'exist_ok': '(True)'}), '(config.out_directory, exist_ok=True)\n', (3281, 3318), False, 'import os\n'), ((3404, 3418), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3416, 3418), True, 'import multiprocessing as mp\n'), ((3564, 3637), 'logging.info', 'logging.info', (['f"""There are {cpu_count} CPUs, using {n_processes} of them."""'], {}), "(f'There are {cpu_count} CPUs, using {n_processes} of them.')\n", (3576, 3637), False, 'import logging\n'), ((3121, 3152), 'os.path.join', 'os.path.join', (['out_directory', 'fn'], {}), '(out_directory, fn)\n', (3133, 3152), False, 'import os\n'), ((3341, 3386), 'os.path.join', 'os.path.join', (['config.image_directory', '"""*.png"""'], {}), "(config.image_directory, '*.png')\n", (3353, 3386), False, 'import os\n'), ((3721, 3811), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker_task', 'args': '(infile_paths, config.out_directory, config.size)'}), '(target=worker_task, args=(infile_paths, config.out_directory,\n config.size))\n', (3731, 3811), True, 'import multiprocessing as mp\n')]
|
import os, os.path as op
import logging
import numpy as np
import cv2
import progressbar
import ast
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pprint
import PIL
from lib.backend import backendDb
from lib.backend import backendMedia
from lib.utils import util
def add_parsers(subparsers):
evaluateDetectionParser(subparsers)
evaluateSegmentationIoUParser(subparsers)
evaluateBinarySegmentationParser(subparsers)
def _evaluateDetectionForClassPascal(c, c_gt, name, args):
def _voc_ap(rec, prec):
""" Compute VOC AP given precision and recall. """
# First append sentinel values at the end.
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# Compute the precision envelope.
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# To calculate area under PR curve, look for points
# where X axis (recall) changes value.
i = np.where(mrec[1:] != mrec[:-1])[0]
# Sum (\Delta recall) * prec.
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(name, ))
entries_det = c.fetchall()
logging.info('Total %d detected objects for class "%s"', len(entries_det),
name)
# Go down dets and mark TPs and FPs.
tp = np.zeros(len(entries_det), dtype=float)
fp = np.zeros(len(entries_det), dtype=float)
# Detected of no interest.
ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box.
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
# Get all GT boxes from the same imagefile [of the same class].
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage no GT boxes.
if bboxes_gt.size == 0:
fp[idet] = 1.
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# IoU and get the best IoU.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Find which objects count towards TP and FN (should be detected).
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# If 1) large enough IoU and
# 2) this GT box was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt in objectids_gt_of_interest:
tp[idet] = 1.
else:
ignored[idet] = True
already_detected.add(objectid_gt)
else:
fp[idet] = 1.
# Find the number of GT of interest.
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (name, ))
n_gt = c_gt.fetchone()[0]
logging.info('Total objects of interest: %d', n_gt)
# Remove dets, neither TP or FP.
tp = tp[np.bitwise_not(ignored)]
fp = fp[np.bitwise_not(ignored)]
logging.info('ignored: %d, tp: %d, fp: %d, gt: %d',
np.count_nonzero(ignored), np.count_nonzero(tp),
np.count_nonzero(fp), n_gt)
assert np.count_nonzero(tp) + np.count_nonzero(fp) + np.count_nonzero(
ignored) == len(entries_det)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(n_gt)
# Avoid divide by zero in case the first detection matches a difficult
# ground truth.
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
aps = _voc_ap(rec, prec)
print('Average precision for class "%s": %.4f' % (name, aps))
return aps
def _writeCurveValues(out_dir, X, Y, metrics_name, name, header):
if name is not None:
name = util.validateFileName(name)
stem = '%s-%s' % (metrics_name, name)
else:
stem = metrics_name
plt.savefig(op.join(out_dir, '%s.png' % stem))
plt.savefig(op.join(out_dir, '%s.eps' % stem))
with open(op.join(out_dir, '%s.txt' % stem), 'w') as f:
f.write('%s\n' % header)
for x, y in zip(X, Y):
f.write('%f %f\n' % (x, y))
def _beautifyPlot(ax):
ax.grid(which='major', linewidth='0.5')
ax.grid(which='minor', linewidth='0.2')
loc = ticker.MultipleLocator(0.2)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
loc = ticker.MultipleLocator(0.1)
ax.xaxis.set_minor_locator(loc)
ax.yaxis.set_minor_locator(loc)
ax.set_aspect('equal', adjustable='box')
def _evaluateDetectionForClassSklearn(c, c_gt, class_name, args, sklearn):
''' Helper function for evaluateDetection. '''
# Detected objects sorted by descending score (confidence).
if class_name is None:
c.execute('SELECT * FROM objects ORDER BY score DESC')
else:
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(class_name, ))
entries_det = c.fetchall()
logging.info('Num of positive "%s": %d', class_name, len(entries_det))
# Create arrays 'y_score' with predicted scores, binary 'y_true' for GT,
# and a binary 'y_ignored' for detected objects that are neither TP nor FP.
y_score = np.zeros(len(entries_det), dtype=float)
y_true = np.zeros(len(entries_det), dtype=bool)
y_ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
score = backendDb.objectField(entry_det, 'score')
y_score[idet] = score
# Get all GT boxes from the same imagefile and of the same class.
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage the case of no GT boxes in this image.
if bboxes_gt.size == 0:
y_score[idet] = False
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# Compute the best IoU between the bbox_det and all bboxes_gt.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Get all GT objects that are of interest.
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# Compute TP and FP. An object is a TP if:
# 1) it has a large enough IoU with a GT object and
# 2) this GT object was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt not in objectids_gt_of_interest:
y_ignored[idet] = True
already_detected.add(objectid_gt)
y_true[idet] = True
else:
y_true[idet] = False
# It doesn't matter if y_ignore'd GT fall into TP or FP. Kick them out.
y_score = y_score[np.bitwise_not(y_ignored)]
y_true = y_true[np.bitwise_not(y_ignored)]
# Find the number of GT of interest.
if class_name is None:
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
else:
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (class_name, ))
num_gt = c_gt.fetchone()[0]
logging.info('Number of ground truth "%s": %d', class_name, num_gt)
# Add FN to y_score and y_true.
num_fn = num_gt - np.count_nonzero(y_true)
logging.info('Number of false negative "%s": %d', class_name, num_fn)
y_score = np.pad(y_score, [0, num_fn], constant_values=0.)
y_true = np.pad(y_true, [0, num_fn], constant_values=True)
# We need the point for threshold=0 to have y=0. Not sure why it's not yet.
# TODO: figure out how to do it properly.
y_score = np.pad(y_score, [0, 1000000], constant_values=0.0001)
y_true = np.pad(y_true, [0, 1000000], constant_values=False)
if 'precision_recall_curve' in args.extra_metrics:
precision, recall, _ = sklearn.metrics.precision_recall_curve(
y_true=y_true, probas_pred=y_score)
if args.out_dir:
plt.clf()
plt.plot(recall, precision)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall')
plt.ylabel('Precision')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, recall, precision,
'precision-recall', class_name,
'recall precision')
if 'roc_curve' in args.extra_metrics:
fpr, tpr, _ = sklearn.metrics.roc_curve(y_true=y_true,
probas_pred=y_score)
sklearn.metrics.auc(x=fpr, y=tpr)
if args.out_dir:
plt.clf()
plt.plot(fpr, tpr)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('FPR')
plt.ylabel('TPR')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, fpr, tpr, 'roc', class_name,
'fpr tpr')
# Compute all metrics for this class.
aps = sklearn.metrics.average_precision_score(y_true=y_true,
y_score=y_score)
if class_name is None:
print('Average precision: %.4f' % aps)
else:
print('Average precision for class "%s": %.4f' % (class_name, aps))
return aps
def evaluateDetectionParser(subparsers):
parser = subparsers.add_parser(
'evaluateDetection',
description='Evaluate detections given a ground truth database.')
parser.set_defaults(func=evaluateDetection)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--IoU_thresh', type=float, default=0.5)
parser.add_argument('--where_object_gt', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, plots and text files are written here.')
parser.add_argument(
'--extra_metrics',
nargs='+',
default=[],
choices=[
'precision_recall_curve',
'roc_curve',
],
help='Select metrics to be computed in addition to average precision. '
'This is implemented only for evaluation_backend="sklearn". '
'They are computed for every class. The names match those at '
'https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics'
)
parser.add_argument(
'--evaluation_backend',
choices=['sklearn', 'pascal-voc', 'sklearn-all-classes'],
default='sklearn',
help='Detection evaluation is different across papers and methods. '
'PASCAL VOC produces average-precision score a bit different '
'than the sklearn package. A good overview on metrics: '
'https://github.com/rafaelpadilla/Object-Detection-Metrics. '
'"sklearn-all-classes" reports only one accuracy.')
def evaluateDetection(c, args):
if 'sklearn' in args.evaluation_backend:
import sklearn.metrics
# Load the ground truth database.
if not op.exists(args.gt_db_file):
raise FileNotFoundError('File does not exist: %s' % args.gt_db_file)
conn_gt = backendDb.connect(args.gt_db_file, 'load_to_memory')
c_gt = conn_gt.cursor()
# Some info for logging.
c.execute('SELECT COUNT(1) FROM objects')
logging.info('The evaluated database has %d objects.', c.fetchone()[0])
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
logging.info('The ground truth database has %d objects of interest.',
c_gt.fetchone()[0])
c_gt.execute('SELECT DISTINCT(name) FROM objects')
names = c_gt.fetchall()
if args.evaluation_backend == 'sklearn':
for name, in names:
_evaluateDetectionForClassSklearn(c, c_gt, name, args, sklearn)
elif args.evaluation_backend == 'pascal-voc':
for name, in names:
if args.metrics is not None:
logging.warning('extra_metrics not supported for pascal-voc.')
_evaluateDetectionForClassPascal(c, c_gt, name, args)
elif args.evaluation_backend == 'sklearn-all-classes':
# This method does not separate results by classes.
_evaluateDetectionForClassSklearn(c, c_gt, None, args, sklearn)
else:
assert False
conn_gt.close()
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k],
minlength=n**2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def calc_fw_iu(hist):
pred_per_class = hist.sum(0)
gt_per_class = hist.sum(1)
return np.nansum(
(gt_per_class * np.diag(hist)) /
(pred_per_class + gt_per_class - np.diag(hist))) / gt_per_class.sum()
def calc_pixel_accuracy(hist):
gt_per_class = hist.sum(1)
return np.diag(hist).sum() / gt_per_class.sum()
def calc_mean_accuracy(hist):
gt_per_class = hist.sum(1)
acc_per_class = np.diag(hist) / gt_per_class
return np.nanmean(acc_per_class)
def save_colorful_images(prediction, filename, palette, postfix='_color.png'):
im = PIL.Image.fromarray(palette[prediction.squeeze()])
im.save(filename[:-4] + postfix)
def label_mapping(input_, mapping):
output = np.copy(input_)
for ind in range(len(mapping)):
output[input_ == mapping[ind][0]] = mapping[ind][1]
return np.array(output, dtype=np.int64)
def plot_confusion_matrix(cm, classes, normalize=False, cmap=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if cmap is None:
cmap = plt.cm.Blues
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logging.info("Normalized confusion matrix.")
else:
logging.info(
'Confusion matrix will be computed without normalization.')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('Ground truth')
plt.xlabel('Predicted label')
def _label2classMapping(gt_mapping_dict, pred_mapping_dict):
''' Parse user-defined label mapping dictionaries. '''
# "gt_mapping_dict" maps mask pixel-values to classes.
labelmap_gt = ast.literal_eval(gt_mapping_dict)
labelmap_pr = ast.literal_eval(
pred_mapping_dict) if pred_mapping_dict else labelmap_gt
# Create a list of classes.
class_names = list(labelmap_gt.values())
labelmap_gt_new = {}
# Here, we remap pixel-values to indices of class_names.
for key in labelmap_gt:
labelmap_gt_new[key] = class_names.index(labelmap_gt[key])
labelmap_gt = labelmap_gt_new
labelmap_pr_new = {}
for key in labelmap_pr:
if not labelmap_pr[key] in class_names:
raise ValueError(
'Class %s is in "pred_mapping_dict" but not in "gt_mapping_dict"'
)
labelmap_pr_new[key] = class_names.index(labelmap_pr[key])
labelmap_pr = labelmap_pr_new
return labelmap_gt, labelmap_pr, class_names
def evaluateSegmentationIoUParser(subparsers):
parser = subparsers.add_parser(
'evaluateSegmentationIoU',
description='Evaluate mask segmentation w.r.t. a ground truth db.')
parser.set_defaults(func=evaluateSegmentationIoU)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, output files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument(
'--gt_mapping_dict',
required=True,
help=
'A map from ground truth maskfile to classes written as a json string. '
'E.g. "{0: \'background\', 255: \'car\'}"')
parser.add_argument(
'--pred_mapping_dict',
help='A map from predicted masks to classes written as a json string, '
'if different from "gt_mapping_dict"')
parser.add_argument(
'--class_to_record_iou',
help='If specified, IoU for a class is recorded into the "score" '
'field of the "images" table. '
'If not specified, mean IoU is recorded. '
'Should correspond to values of "gt_mapping_dict". E.g. "background".')
parser.add_argument(
'--out_summary_file',
help='Text file, where the summary is going to be appended as just one '
'line of format: out_prefix \\t IoU_class1 \\t IoU_class2 \\t etc.')
def evaluateSegmentationIoU(c, args):
import pandas as pd
import matplotlib.pyplot as plt
# Get corresponding maskfiles from predictions and ground truth.
logging.info('Opening ground truth dataset: %s', args.gt_db_file)
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.',
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
labelmap_gt, labelmap_pr, class_names = _label2classMapping(
args.gt_mapping_dict, args.pred_mapping_dict)
if args.class_to_record_iou is not None and not args.class_to_record_iou in class_names:
raise ValueError(
'class_to_record_iou=%s is not among values of gt_mapping_dict=%s'
% (args.class_to_record_iou, args.gt_mapping_dict))
hist_all = np.zeros((len(class_names), len(class_names)))
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = util.applyLabelMappingToMask(imreader.maskread(maskfile_gt),
labelmap_gt)
mask_pr = util.applyLabelMappingToMask(imreader.maskread(maskfile_pr),
labelmap_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
interpolation=cv2.INTER_NEAREST)
# Evaluate one image pair.
careabout = ~np.isnan(mask_gt)
mask_gt = mask_gt[careabout][:].astype(int)
mask_pr = mask_pr[careabout][:].astype(int)
hist = fast_hist(mask_gt, mask_pr, len(class_names))
hist_all += hist
# Compute and record results by image.
iou_list = per_class_iu(hist)
if args.class_to_record_iou is None:
iou = iou_list.mean()
else:
iou = iou_list[class_names.index(args.class_to_record_iou)]
c.execute('UPDATE images SET score=? WHERE imagefile=?',
(iou, imagefile))
# Get label distribution.
pr_per_class = hist_all.sum(0)
gt_per_class = hist_all.sum(1)
iou_list = per_class_iu(hist_all)
fwIoU = calc_fw_iu(hist_all)
pixAcc = calc_pixel_accuracy(hist_all)
mAcc = calc_mean_accuracy(hist_all)
result_df = pd.DataFrame({
'class': class_names,
'IoU': iou_list,
"pr_distribution": pr_per_class,
"gt_distribution": gt_per_class,
})
result_df["IoU"] *= 100 # Changing to percent ratio.
result_df.set_index("class", inplace=True)
print("---- info per class -----")
print(result_df)
result_ser = pd.Series({
"pixAcc": pixAcc,
"mAcc": mAcc,
"fwIoU": fwIoU,
"mIoU": iou_list.mean()
})
result_ser = result_ser[["pixAcc", "mAcc", "fwIoU", "mIoU"]]
result_ser *= 100 # change to percent ratio
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
out_summary_path = op.join(args.out_dir, args.out_summary_file)
logging.info('Will add summary to: %s', out_summary_path)
with open(out_summary_path, 'a') as f:
f.write(args.out_prefix + '\t' +
'\t'.join(['%.2f' % x for x in result_df['IoU']]) + '\n')
# Save confusion matrix
fig = plt.figure()
normalized_hist = (hist.astype("float") /
hist.sum(axis=1)[:, np.newaxis])
plot_confusion_matrix(normalized_hist, classes=class_names)
outfigfn = op.join(args.out_dir, "%sconf_mat.pdf" % args.out_prefix)
fig.savefig(outfigfn,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
print("Confusion matrix was saved to %s" % outfigfn)
outdffn = op.join(args.out_dir,
"%seval_result_df.csv" % args.out_prefix)
result_df.to_csv(outdffn)
print('Info per class was saved at %s !' % outdffn)
outserfn = op.join(args.out_dir,
"%seval_result_ser.csv" % args.out_prefix)
result_ser.to_csv(outserfn)
print('Total result is saved at %s !' % outserfn)
def getPrecRecall(tp, fp, fn):
''' Accumulate into Precision-Recall curve. '''
ROC = np.zeros((256, 2), dtype=float)
for val in range(256):
if tp[val] == 0 and fp[val] == 0:
precision = -1.
else:
precision = tp[val] / float(tp[val] + fp[val])
if tp[val] == 0 and fn[val] == 0:
recall = -1.
else:
recall = tp[val] / float(tp[val] + fn[val])
ROC[val, 0] = recall
ROC[val, 1] = precision
ROC = ROC[np.bitwise_and(ROC[:, 0] != -1, ROC[:, 1] != -1), :]
ROC = np.vstack((ROC, np.array([0, ROC[-1, 1]])))
area = -np.trapz(x=ROC[:, 0], y=ROC[:, 1])
return ROC, area
def evaluateBinarySegmentationParser(subparsers):
parser = subparsers.add_parser(
'evaluateBinarySegmentation',
description=
'Evaluate mask segmentation ROC curve w.r.t. a ground truth db. '
'Ground truth values must be 0 for background, 255 for foreground, '
'and the rest for "dontcare".'
'Predicted mask must be grayscale in [0,255], '
'with brightness meaning probability of foreground.')
parser.set_defaults(func=evaluateBinarySegmentation)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, result files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument('--display_images_roc',
action='store_true',
help='Specify to display on screen')
def evaluateBinarySegmentation(c, args):
import pandas as pd
# Get corresponding maskfiles from predictions and ground truth.
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile '
'AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.' %
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
TPs = np.zeros((256, ), dtype=int)
FPs = np.zeros((256, ), dtype=int)
FNs = np.zeros((256, ), dtype=int)
if args.display_images_roc:
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = imreader.maskread(maskfile_gt)
mask_pr = imreader.maskread(maskfile_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
cv2.INTER_NEAREST)
# Some printputs.
gt_pos = np.count_nonzero(mask_gt == 255)
gt_neg = np.count_nonzero(mask_gt == 0)
gt_other = mask_gt.size - gt_pos - gt_neg
logging.debug('GT: positive: %d, negative: %d, others: %d.', gt_pos,
gt_neg, gt_other)
# If there is torch.
try:
import torch
# Use only relevant pixels (not the 'dontcare' class.)
relevant = np.bitwise_or(mask_gt == 0, mask_gt == 255)
mask_gt = mask_gt[relevant].flatten()
mask_pr = mask_pr[relevant].flatten()
mask_gt = torch.Tensor(mask_gt)
mask_pr = torch.Tensor(mask_pr)
try:
mask_gt = mask_gt.cuda()
mask_pr = mask_pr.cuda()
except RuntimeError:
pass
TP = np.zeros((256, ), dtype=int)
FP = np.zeros((256, ), dtype=int)
FN = np.zeros((256, ), dtype=int)
for val in range(256):
tp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt == 255)).size()[0]
fp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt != 255)).size()[0]
fn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt == 255)).size()[0]
tn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt != 255)).size()[0]
TP[val] = tp
FP[val] = fp
FN[val] = fn
TPs[val] += tp
FPs[val] += fp
FNs[val] += fn
ROC, area = getPrecRecall(TP, FP, FN)
logging.info('%s\t%.2f' % (op.basename(imagefile), area * 100.))
except ImportError:
# TODO: write the same without torch, on CPU
raise NotImplementedError(
'Non-torch implementation is still to be implemented.')
if args.display_images_roc:
plt.plot(ROC[:, 0], ROC[:, 1], 'go-', linewidth=2, markersize=4)
plt.pause(0.05)
fig.show()
# Accumulate into Precision-Recall curve.
ROC, area = getPrecRecall(TPs, FPs, FNs)
print(
"Average across image area under the Precision-Recall curve, perc: %.2f"
% (area * 100.))
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.plot(ROC[:, 0], ROC[:, 1], 'bo-', linewidth=2, markersize=6)
out_plot_path = op.join(args.out_dir,
'%srecall-prec.png' % args.out_prefix)
fig.savefig(out_plot_path,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
|
[
"pprint.pformat",
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"matplotlib.pyplot.clf",
"numpy.isnan",
"matplotlib.pyplot.figure",
"lib.backend.backendDb.connect",
"matplotlib.pyplot.gca",
"numpy.diag",
"numpy.bitwise_or",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.nanmean",
"numpy.pad",
"pandas.DataFrame",
"lib.backend.backendMedia.MediaReader",
"numpy.copy",
"logging.warning",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.cumsum",
"numpy.max",
"torch.Tensor",
"numpy.finfo",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.pause",
"cv2.resize",
"numpy.trapz",
"numpy.minimum",
"lib.backend.backendDb.objectField",
"matplotlib.pyplot.ylim",
"os.path.basename",
"numpy.bitwise_not",
"lib.utils.util.validateFileName",
"torch.mul",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"logging.debug",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"os.makedirs",
"progressbar.progressbar",
"numpy.zeros",
"logging.info",
"numpy.where",
"numpy.array",
"numpy.bitwise_and",
"ast.literal_eval",
"matplotlib.pyplot.xlabel"
] |
[((4587, 4638), 'logging.info', 'logging.info', (['"""Total objects of interest: %d"""', 'n_gt'], {}), "('Total objects of interest: %d', n_gt)\n", (4599, 4638), False, 'import logging\n'), ((5041, 5054), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (5050, 5054), True, 'import numpy as np\n'), ((5064, 5077), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (5073, 5077), True, 'import numpy as np\n'), ((5981, 6008), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (6003, 6008), True, 'import matplotlib.ticker as ticker\n'), ((6091, 6118), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (6113, 6118), True, 'import matplotlib.ticker as ticker\n'), ((10568, 10635), 'logging.info', 'logging.info', (['"""Number of ground truth "%s": %d"""', 'class_name', 'num_gt'], {}), '(\'Number of ground truth "%s": %d\', class_name, num_gt)\n', (10580, 10635), False, 'import logging\n'), ((10724, 10793), 'logging.info', 'logging.info', (['"""Number of false negative "%s": %d"""', 'class_name', 'num_fn'], {}), '(\'Number of false negative "%s": %d\', class_name, num_fn)\n', (10736, 10793), False, 'import logging\n'), ((10808, 10857), 'numpy.pad', 'np.pad', (['y_score', '[0, num_fn]'], {'constant_values': '(0.0)'}), '(y_score, [0, num_fn], constant_values=0.0)\n', (10814, 10857), True, 'import numpy as np\n'), ((10870, 10919), 'numpy.pad', 'np.pad', (['y_true', '[0, num_fn]'], {'constant_values': '(True)'}), '(y_true, [0, num_fn], constant_values=True)\n', (10876, 10919), True, 'import numpy as np\n'), ((11061, 11114), 'numpy.pad', 'np.pad', (['y_score', '[0, 1000000]'], {'constant_values': '(0.0001)'}), '(y_score, [0, 1000000], constant_values=0.0001)\n', (11067, 11114), True, 'import numpy as np\n'), ((11128, 11179), 'numpy.pad', 'np.pad', (['y_true', '[0, 1000000]'], {'constant_values': '(False)'}), '(y_true, [0, 1000000], constant_values=False)\n', (11134, 11179), True, 'import numpy as np\n'), ((14489, 14541), 'lib.backend.backendDb.connect', 'backendDb.connect', (['args.gt_db_file', '"""load_to_memory"""'], {}), "(args.gt_db_file, 'load_to_memory')\n", (14506, 14541), False, 'from lib.backend import backendDb\n'), ((16393, 16418), 'numpy.nanmean', 'np.nanmean', (['acc_per_class'], {}), '(acc_per_class)\n', (16403, 16418), True, 'import numpy as np\n'), ((16648, 16663), 'numpy.copy', 'np.copy', (['input_'], {}), '(input_)\n', (16655, 16663), True, 'import numpy as np\n'), ((16771, 16803), 'numpy.array', 'np.array', (['output'], {'dtype': 'np.int64'}), '(output, dtype=np.int64)\n', (16779, 16803), True, 'import numpy as np\n'), ((17303, 17353), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (17313, 17353), True, 'import matplotlib.pyplot as plt\n'), ((17358, 17372), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (17370, 17372), True, 'import matplotlib.pyplot as plt\n'), ((17418, 17462), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(90)'}), '(tick_marks, classes, rotation=90)\n', (17428, 17462), True, 'import matplotlib.pyplot as plt\n'), ((17467, 17498), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (17477, 17498), True, 'import matplotlib.pyplot as plt\n'), ((17504, 17522), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17520, 17522), True, 'import matplotlib.pyplot as plt\n'), ((17527, 17553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ground truth"""'], {}), "('Ground truth')\n", (17537, 17553), True, 'import matplotlib.pyplot as plt\n'), ((17558, 17587), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (17568, 17587), True, 'import matplotlib.pyplot as plt\n'), ((17788, 17821), 'ast.literal_eval', 'ast.literal_eval', (['gt_mapping_dict'], {}), '(gt_mapping_dict)\n', (17804, 17821), False, 'import ast\n'), ((20365, 20430), 'logging.info', 'logging.info', (['"""Opening ground truth dataset: %s"""', 'args.gt_db_file'], {}), "('Opening ground truth dataset: %s', args.gt_db_file)\n", (20377, 20430), False, 'import logging\n'), ((21025, 21071), 'lib.backend.backendMedia.MediaReader', 'backendMedia.MediaReader', ([], {'rootdir': 'args.rootdir'}), '(rootdir=args.rootdir)\n', (21049, 21071), False, 'from lib.backend import backendMedia\n'), ((21566, 21598), 'progressbar.progressbar', 'progressbar.progressbar', (['entries'], {}), '(entries)\n', (21589, 21598), False, 'import progressbar\n'), ((22976, 23099), 'pandas.DataFrame', 'pd.DataFrame', (["{'class': class_names, 'IoU': iou_list, 'pr_distribution': pr_per_class,\n 'gt_distribution': gt_per_class}"], {}), "({'class': class_names, 'IoU': iou_list, 'pr_distribution':\n pr_per_class, 'gt_distribution': gt_per_class})\n", (22988, 23099), True, 'import pandas as pd\n'), ((25029, 25060), 'numpy.zeros', 'np.zeros', (['(256, 2)'], {'dtype': 'float'}), '((256, 2), dtype=float)\n', (25037, 25060), True, 'import numpy as np\n'), ((27458, 27504), 'lib.backend.backendMedia.MediaReader', 'backendMedia.MediaReader', ([], {'rootdir': 'args.rootdir'}), '(rootdir=args.rootdir)\n', (27482, 27504), False, 'from lib.backend import backendMedia\n'), ((27516, 27543), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27524, 27543), True, 'import numpy as np\n'), ((27555, 27582), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27563, 27582), True, 'import numpy as np\n'), ((27594, 27621), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (27602, 27621), True, 'import numpy as np\n'), ((27838, 27870), 'progressbar.progressbar', 'progressbar.progressbar', (['entries'], {}), '(entries)\n', (27861, 27870), False, 'import progressbar\n'), ((672, 707), 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), '(([0.0], rec, [1.0]))\n', (686, 707), True, 'import numpy as np\n'), ((721, 757), 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), '(([0.0], prec, [0.0]))\n', (735, 757), True, 'import numpy as np\n'), ((1111, 1156), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (1117, 1156), True, 'import numpy as np\n'), ((1954, 1999), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""imagefile"""'], {}), "(entry_det, 'imagefile')\n", (1975, 1999), False, 'from lib.backend import backendDb\n'), ((2015, 2055), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""name"""'], {}), "(entry_det, 'name')\n", (2036, 2055), False, 'from lib.backend import backendDb\n'), ((2721, 2761), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 0]', 'bbox_det[0]'], {}), '(bboxes_gt[:, 0], bbox_det[0])\n', (2731, 2761), True, 'import numpy as np\n'), ((2778, 2818), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 1]', 'bbox_det[1]'], {}), '(bboxes_gt[:, 1], bbox_det[1])\n', (2788, 2818), True, 'import numpy as np\n'), ((2835, 2907), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 0] + bboxes_gt[:, 2])', '(bbox_det[0] + bbox_det[2])'], {}), '(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox_det[0] + bbox_det[2])\n', (2845, 2907), True, 'import numpy as np\n'), ((2951, 3023), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 1] + bboxes_gt[:, 3])', '(bbox_det[1] + bbox_det[3])'], {}), '(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox_det[1] + bbox_det[3])\n', (2961, 3023), True, 'import numpy as np\n'), ((3064, 3094), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin)', '(0.0)'], {}), '(ixmax - ixmin, 0.0)\n', (3074, 3094), True, 'import numpy as np\n'), ((3107, 3137), 'numpy.maximum', 'np.maximum', (['(iymax - iymin)', '(0.0)'], {}), '(iymax - iymin, 0.0)\n', (3117, 3137), True, 'import numpy as np\n'), ((3424, 3436), 'numpy.max', 'np.max', (['IoUs'], {}), '(IoUs)\n', (3430, 3436), True, 'import numpy as np\n'), ((3497, 3591), 'logging.debug', 'logging.debug', (['"""max_IoU=%.3f for idet %d with objectid_gt %d."""', 'max_IoU', 'idet', 'objectid_gt'], {}), "('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,\n idet, objectid_gt)\n", (3510, 3591), False, 'import logging\n'), ((4689, 4712), 'numpy.bitwise_not', 'np.bitwise_not', (['ignored'], {}), '(ignored)\n', (4703, 4712), True, 'import numpy as np\n'), ((4726, 4749), 'numpy.bitwise_not', 'np.bitwise_not', (['ignored'], {}), '(ignored)\n', (4740, 4749), True, 'import numpy as np\n'), ((4825, 4850), 'numpy.count_nonzero', 'np.count_nonzero', (['ignored'], {}), '(ignored)\n', (4841, 4850), True, 'import numpy as np\n'), ((4852, 4872), 'numpy.count_nonzero', 'np.count_nonzero', (['tp'], {}), '(tp)\n', (4868, 4872), True, 'import numpy as np\n'), ((4891, 4911), 'numpy.count_nonzero', 'np.count_nonzero', (['fp'], {}), '(fp)\n', (4907, 4911), True, 'import numpy as np\n'), ((5480, 5507), 'lib.utils.util.validateFileName', 'util.validateFileName', (['name'], {}), '(name)\n', (5501, 5507), False, 'from lib.utils import util\n'), ((5608, 5641), 'os.path.join', 'op.join', (['out_dir', "('%s.png' % stem)"], {}), "(out_dir, '%s.png' % stem)\n", (5615, 5641), True, 'import os, os.path as op\n'), ((5659, 5692), 'os.path.join', 'op.join', (['out_dir', "('%s.eps' % stem)"], {}), "(out_dir, '%s.eps' % stem)\n", (5666, 5692), True, 'import os, os.path as op\n'), ((7388, 7433), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""imagefile"""'], {}), "(entry_det, 'imagefile')\n", (7409, 7433), False, 'from lib.backend import backendDb\n'), ((7449, 7489), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""name"""'], {}), "(entry_det, 'name')\n", (7470, 7489), False, 'from lib.backend import backendDb\n'), ((7506, 7547), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""score"""'], {}), "(entry_det, 'score')\n", (7527, 7547), False, 'from lib.backend import backendDb\n'), ((8280, 8320), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 0]', 'bbox_det[0]'], {}), '(bboxes_gt[:, 0], bbox_det[0])\n', (8290, 8320), True, 'import numpy as np\n'), ((8337, 8377), 'numpy.maximum', 'np.maximum', (['bboxes_gt[:, 1]', 'bbox_det[1]'], {}), '(bboxes_gt[:, 1], bbox_det[1])\n', (8347, 8377), True, 'import numpy as np\n'), ((8394, 8466), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 0] + bboxes_gt[:, 2])', '(bbox_det[0] + bbox_det[2])'], {}), '(bboxes_gt[:, 0] + bboxes_gt[:, 2], bbox_det[0] + bbox_det[2])\n', (8404, 8466), True, 'import numpy as np\n'), ((8510, 8582), 'numpy.minimum', 'np.minimum', (['(bboxes_gt[:, 1] + bboxes_gt[:, 3])', '(bbox_det[1] + bbox_det[3])'], {}), '(bboxes_gt[:, 1] + bboxes_gt[:, 3], bbox_det[1] + bbox_det[3])\n', (8520, 8582), True, 'import numpy as np\n'), ((8623, 8653), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin)', '(0.0)'], {}), '(ixmax - ixmin, 0.0)\n', (8633, 8653), True, 'import numpy as np\n'), ((8666, 8696), 'numpy.maximum', 'np.maximum', (['(iymax - iymin)', '(0.0)'], {}), '(iymax - iymin, 0.0)\n', (8676, 8696), True, 'import numpy as np\n'), ((9018, 9030), 'numpy.max', 'np.max', (['IoUs'], {}), '(IoUs)\n', (9024, 9030), True, 'import numpy as np\n'), ((9091, 9185), 'logging.debug', 'logging.debug', (['"""max_IoU=%.3f for idet %d with objectid_gt %d."""', 'max_IoU', 'idet', 'objectid_gt'], {}), "('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,\n idet, objectid_gt)\n", (9104, 9185), False, 'import logging\n'), ((10136, 10161), 'numpy.bitwise_not', 'np.bitwise_not', (['y_ignored'], {}), '(y_ignored)\n', (10150, 10161), True, 'import numpy as np\n'), ((10183, 10208), 'numpy.bitwise_not', 'np.bitwise_not', (['y_ignored'], {}), '(y_ignored)\n', (10197, 10208), True, 'import numpy as np\n'), ((10695, 10719), 'numpy.count_nonzero', 'np.count_nonzero', (['y_true'], {}), '(y_true)\n', (10711, 10719), True, 'import numpy as np\n'), ((14370, 14396), 'os.path.exists', 'op.exists', (['args.gt_db_file'], {}), '(args.gt_db_file)\n', (14379, 14396), True, 'import os, os.path as op\n'), ((15865, 15878), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (15872, 15878), True, 'import numpy as np\n'), ((16353, 16366), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16360, 16366), True, 'import numpy as np\n'), ((17149, 17193), 'logging.info', 'logging.info', (['"""Normalized confusion matrix."""'], {}), "('Normalized confusion matrix.')\n", (17161, 17193), False, 'import logging\n'), ((17212, 17284), 'logging.info', 'logging.info', (['"""Confusion matrix will be computed without normalization."""'], {}), "('Confusion matrix will be computed without normalization.')\n", (17224, 17284), False, 'import logging\n'), ((17840, 17875), 'ast.literal_eval', 'ast.literal_eval', (['pred_mapping_dict'], {}), '(pred_mapping_dict)\n', (17856, 17875), False, 'import ast\n'), ((20984, 21007), 'pprint.pformat', 'pprint.pformat', (['entries'], {}), '(entries)\n', (20998, 21007), False, 'import pprint\n'), ((21966, 22061), 'cv2.resize', 'cv2.resize', (['mask_pr', '(mask_gt.shape[1], mask_gt.shape[0])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]), interpolation=cv2\n .INTER_NEAREST)\n', (21976, 22061), False, 'import cv2\n'), ((23696, 23740), 'os.path.join', 'op.join', (['args.out_dir', 'args.out_summary_file'], {}), '(args.out_dir, args.out_summary_file)\n', (23703, 23740), True, 'import os, os.path as op\n'), ((23749, 23806), 'logging.info', 'logging.info', (['"""Will add summary to: %s"""', 'out_summary_path'], {}), "('Will add summary to: %s', out_summary_path)\n", (23761, 23806), False, 'import logging\n'), ((24024, 24036), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24034, 24036), True, 'import matplotlib.pyplot as plt\n'), ((24235, 24292), 'os.path.join', 'op.join', (['args.out_dir', "('%sconf_mat.pdf' % args.out_prefix)"], {}), "(args.out_dir, '%sconf_mat.pdf' % args.out_prefix)\n", (24242, 24292), True, 'import os, os.path as op\n'), ((24545, 24608), 'os.path.join', 'op.join', (['args.out_dir', "('%seval_result_df.csv' % args.out_prefix)"], {}), "(args.out_dir, '%seval_result_df.csv' % args.out_prefix)\n", (24552, 24608), True, 'import os, os.path as op\n'), ((24748, 24812), 'os.path.join', 'op.join', (['args.out_dir', "('%seval_result_ser.csv' % args.out_prefix)"], {}), "(args.out_dir, '%seval_result_ser.csv' % args.out_prefix)\n", (24755, 24812), True, 'import os, os.path as op\n'), ((25562, 25596), 'numpy.trapz', 'np.trapz', ([], {'x': 'ROC[:, 0]', 'y': 'ROC[:, 1]'}), '(x=ROC[:, 0], y=ROC[:, 1])\n', (25570, 25596), True, 'import numpy as np\n'), ((27417, 27440), 'pprint.pformat', 'pprint.pformat', (['entries'], {}), '(entries)\n', (27431, 27440), False, 'import pprint\n'), ((27670, 27682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27680, 27682), True, 'import matplotlib.pyplot as plt\n'), ((27691, 27711), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (27701, 27711), True, 'import matplotlib.pyplot as plt\n'), ((27720, 27743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (27730, 27743), True, 'import matplotlib.pyplot as plt\n'), ((27752, 27766), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (27760, 27766), True, 'import matplotlib.pyplot as plt\n'), ((27775, 27789), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (27783, 27789), True, 'import matplotlib.pyplot as plt\n'), ((28058, 28134), 'cv2.resize', 'cv2.resize', (['mask_pr', '(mask_gt.shape[1], mask_gt.shape[0])', 'cv2.INTER_NEAREST'], {}), '(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]), cv2.INTER_NEAREST)\n', (28068, 28134), False, 'import cv2\n'), ((28208, 28240), 'numpy.count_nonzero', 'np.count_nonzero', (['(mask_gt == 255)'], {}), '(mask_gt == 255)\n', (28224, 28240), True, 'import numpy as np\n'), ((28258, 28288), 'numpy.count_nonzero', 'np.count_nonzero', (['(mask_gt == 0)'], {}), '(mask_gt == 0)\n', (28274, 28288), True, 'import numpy as np\n'), ((28347, 28437), 'logging.debug', 'logging.debug', (['"""GT: positive: %d, negative: %d, others: %d."""', 'gt_pos', 'gt_neg', 'gt_other'], {}), "('GT: positive: %d, negative: %d, others: %d.', gt_pos, gt_neg,\n gt_other)\n", (28360, 28437), False, 'import logging\n'), ((30707, 30719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30717, 30719), True, 'import matplotlib.pyplot as plt\n'), ((30728, 30748), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (30738, 30748), True, 'import matplotlib.pyplot as plt\n'), ((30757, 30780), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (30767, 30780), True, 'import matplotlib.pyplot as plt\n'), ((30789, 30803), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (30797, 30803), True, 'import matplotlib.pyplot as plt\n'), ((30812, 30826), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (30820, 30826), True, 'import matplotlib.pyplot as plt\n'), ((30835, 30899), 'matplotlib.pyplot.plot', 'plt.plot', (['ROC[:, 0]', 'ROC[:, 1]', '"""bo-"""'], {'linewidth': '(2)', 'markersize': '(6)'}), "(ROC[:, 0], ROC[:, 1], 'bo-', linewidth=2, markersize=6)\n", (30843, 30899), True, 'import matplotlib.pyplot as plt\n'), ((30924, 30984), 'os.path.join', 'op.join', (['args.out_dir', "('%srecall-prec.png' % args.out_prefix)"], {}), "(args.out_dir, '%srecall-prec.png' % args.out_prefix)\n", (30931, 30984), True, 'import os, os.path as op\n'), ((871, 903), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (881, 903), True, 'import numpy as np\n'), ((1024, 1055), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (1032, 1055), True, 'import numpy as np\n'), ((1851, 1891), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""bbox"""'], {}), "(entry_det, 'bbox')\n", (1872, 1891), False, 'from lib.backend import backendDb\n'), ((2318, 2358), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (2339, 2358), False, 'from lib.backend import backendDb\n'), ((3472, 3487), 'numpy.argmax', 'np.argmax', (['IoUs'], {}), '(IoUs)\n', (3481, 3487), True, 'import numpy as np\n'), ((3921, 3961), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (3942, 3961), False, 'from lib.backend import backendDb\n'), ((4976, 5001), 'numpy.count_nonzero', 'np.count_nonzero', (['ignored'], {}), '(ignored)\n', (4992, 5001), True, 'import numpy as np\n'), ((5708, 5741), 'os.path.join', 'op.join', (['out_dir', "('%s.txt' % stem)"], {}), "(out_dir, '%s.txt' % stem)\n", (5715, 5741), True, 'import os, os.path as op\n'), ((7285, 7325), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry_det', '"""bbox"""'], {}), "(entry_det, 'bbox')\n", (7306, 7325), False, 'from lib.backend import backendDb\n'), ((7843, 7883), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (7864, 7883), False, 'from lib.backend import backendDb\n'), ((9066, 9081), 'numpy.argmax', 'np.argmax', (['IoUs'], {}), '(IoUs)\n', (9075, 9081), True, 'import numpy as np\n'), ((9491, 9531), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""objectid"""'], {}), "(entry, 'objectid')\n", (9512, 9531), False, 'from lib.backend import backendDb\n'), ((11392, 11401), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11399, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11414, 11441), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {}), '(recall, precision)\n', (11422, 11441), True, 'import matplotlib.pyplot as plt\n'), ((11454, 11470), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (11462, 11470), True, 'import matplotlib.pyplot as plt\n'), ((11483, 11499), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (11491, 11499), True, 'import matplotlib.pyplot as plt\n'), ((11512, 11532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (11522, 11532), True, 'import matplotlib.pyplot as plt\n'), ((11545, 11568), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (11555, 11568), True, 'import matplotlib.pyplot as plt\n'), ((12035, 12044), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12042, 12044), True, 'import matplotlib.pyplot as plt\n'), ((12057, 12075), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12065, 12075), True, 'import matplotlib.pyplot as plt\n'), ((12088, 12104), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (12096, 12104), True, 'import matplotlib.pyplot as plt\n'), ((12117, 12133), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (12125, 12133), True, 'import matplotlib.pyplot as plt\n'), ((12146, 12163), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (12156, 12163), True, 'import matplotlib.pyplot as plt\n'), ((12176, 12193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (12186, 12193), True, 'import matplotlib.pyplot as plt\n'), ((15910, 15923), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (15917, 15923), True, 'import numpy as np\n'), ((22143, 22160), 'numpy.isnan', 'np.isnan', (['mask_gt'], {}), '(mask_gt)\n', (22151, 22160), True, 'import numpy as np\n'), ((23605, 23628), 'os.path.exists', 'op.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (23614, 23628), True, 'import os, os.path as op\n'), ((23642, 23667), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (23653, 23667), False, 'import os, os.path as op\n'), ((25443, 25491), 'numpy.bitwise_and', 'np.bitwise_and', (['(ROC[:, 0] != -1)', '(ROC[:, 1] != -1)'], {}), '(ROC[:, 0] != -1, ROC[:, 1] != -1)\n', (25457, 25491), True, 'import numpy as np\n'), ((25522, 25547), 'numpy.array', 'np.array', (['[0, ROC[-1, 1]]'], {}), '([0, ROC[-1, 1]])\n', (25530, 25547), True, 'import numpy as np\n'), ((28614, 28657), 'numpy.bitwise_or', 'np.bitwise_or', (['(mask_gt == 0)', '(mask_gt == 255)'], {}), '(mask_gt == 0, mask_gt == 255)\n', (28627, 28657), True, 'import numpy as np\n'), ((28780, 28801), 'torch.Tensor', 'torch.Tensor', (['mask_gt'], {}), '(mask_gt)\n', (28792, 28801), False, 'import torch\n'), ((28824, 28845), 'torch.Tensor', 'torch.Tensor', (['mask_pr'], {}), '(mask_pr)\n', (28836, 28845), False, 'import torch\n'), ((29017, 29044), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29025, 29044), True, 'import numpy as np\n'), ((29063, 29090), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29071, 29090), True, 'import numpy as np\n'), ((29109, 29136), 'numpy.zeros', 'np.zeros', (['(256,)'], {'dtype': 'int'}), '((256,), dtype=int)\n', (29117, 29136), True, 'import numpy as np\n'), ((30256, 30320), 'matplotlib.pyplot.plot', 'plt.plot', (['ROC[:, 0]', 'ROC[:, 1]', '"""go-"""'], {'linewidth': '(2)', 'markersize': '(4)'}), "(ROC[:, 0], ROC[:, 1], 'go-', linewidth=2, markersize=4)\n", (30264, 30320), True, 'import matplotlib.pyplot as plt\n'), ((30333, 30348), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (30342, 30348), True, 'import matplotlib.pyplot as plt\n'), ((30630, 30653), 'os.path.exists', 'op.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (30639, 30653), True, 'import os, os.path as op\n'), ((30667, 30692), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (30678, 30692), False, 'import os, os.path as op\n'), ((2436, 2472), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""bbox"""'], {}), "(entry, 'bbox')\n", (2457, 2472), False, 'from lib.backend import backendDb\n'), ((4930, 4950), 'numpy.count_nonzero', 'np.count_nonzero', (['tp'], {}), '(tp)\n', (4946, 4950), True, 'import numpy as np\n'), ((4953, 4973), 'numpy.count_nonzero', 'np.count_nonzero', (['fp'], {}), '(fp)\n', (4969, 4973), True, 'import numpy as np\n'), ((5236, 5256), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (5244, 5256), True, 'import numpy as np\n'), ((7961, 7997), 'lib.backend.backendDb.objectField', 'backendDb.objectField', (['entry', '"""bbox"""'], {}), "(entry, 'bbox')\n", (7982, 7997), False, 'from lib.backend import backendDb\n'), ((11595, 11604), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11602, 11604), True, 'import matplotlib.pyplot as plt\n'), ((12220, 12229), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12227, 12229), True, 'import matplotlib.pyplot as plt\n'), ((16229, 16242), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16236, 16242), True, 'import numpy as np\n'), ((15299, 15361), 'logging.warning', 'logging.warning', (['"""extra_metrics not supported for pascal-voc."""'], {}), "('extra_metrics not supported for pascal-voc.')\n", (15314, 15361), False, 'import logging\n'), ((16059, 16072), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16066, 16072), True, 'import numpy as np\n'), ((16117, 16130), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (16124, 16130), True, 'import numpy as np\n'), ((29972, 29994), 'os.path.basename', 'op.basename', (['imagefile'], {}), '(imagefile)\n', (29983, 29994), True, 'import os, os.path as op\n'), ((29208, 29248), 'torch.mul', 'torch.mul', (['(mask_pr > val)', '(mask_gt == 255)'], {}), '(mask_pr > val, mask_gt == 255)\n', (29217, 29248), False, 'import torch\n'), ((29340, 29380), 'torch.mul', 'torch.mul', (['(mask_pr > val)', '(mask_gt != 255)'], {}), '(mask_pr > val, mask_gt != 255)\n', (29349, 29380), False, 'import torch\n'), ((29472, 29513), 'torch.mul', 'torch.mul', (['(mask_pr <= val)', '(mask_gt == 255)'], {}), '(mask_pr <= val, mask_gt == 255)\n', (29481, 29513), False, 'import torch\n'), ((29605, 29646), 'torch.mul', 'torch.mul', (['(mask_pr <= val)', '(mask_gt != 255)'], {}), '(mask_pr <= val, mask_gt != 255)\n', (29614, 29646), False, 'import torch\n')]
|
import tensorflow as tf
"""
Instruction to the code there can be found at:
https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html
"""
# Creates a graph.
with tf.device('/cpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
print(sess.run(c))
"""
Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
I tensorflow/core/common_runtime/direct_session.cc:175] Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
MatMul: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] MatMul: /job:localhost/replica:0/task:0/cpu:0
b: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] b: /job:localhost/replica:0/task:0/cpu:0
a: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] a: /job:localhost/replica:0/task:0/cpu:0
[[ 22. 28.]
[ 49. 64.]]
"""
""" allow memory growth:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config, ...)
"""
""" Partial memory allocation
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
session = tf.Session(config=config, ...)
"""
### Allow non-default manual device selection. Flag needed for manual device selection
# Creates a graph.
with tf.device('/gpu:5'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with allow_soft_placement and log_device_placement set
# to True.
# Ends up using /gpu:0 because it is available.
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True)) as sess:
print(sess.run(c))
|
[
"tensorflow.matmul",
"tensorflow.device",
"tensorflow.constant",
"tensorflow.ConfigProto"
] |
[((175, 194), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (184, 194), True, 'import tensorflow as tf\n'), ((204, 271), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {'shape': '[2, 3]', 'name': '"""a"""'}), "([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n", (215, 271), True, 'import tensorflow as tf\n'), ((280, 347), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {'shape': '[3, 2]', 'name': '"""b"""'}), "([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n", (291, 347), True, 'import tensorflow as tf\n'), ((356, 371), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {}), '(a, b)\n', (365, 371), True, 'import tensorflow as tf\n'), ((2315, 2334), 'tensorflow.device', 'tf.device', (['"""/gpu:5"""'], {}), "('/gpu:5')\n", (2324, 2334), True, 'import tensorflow as tf\n'), ((2342, 2409), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {'shape': '[2, 3]', 'name': '"""a"""'}), "([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n", (2353, 2409), True, 'import tensorflow as tf\n'), ((2416, 2483), 'tensorflow.constant', 'tf.constant', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {'shape': '[3, 2]', 'name': '"""b"""'}), "([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n", (2427, 2483), True, 'import tensorflow as tf\n'), ((2490, 2505), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {}), '(a, b)\n', (2499, 2505), True, 'import tensorflow as tf\n'), ((455, 496), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(True)'}), '(log_device_placement=True)\n', (469, 496), True, 'import tensorflow as tf\n'), ((2663, 2731), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (2677, 2731), True, 'import tensorflow as tf\n')]
|
""" Low level tests for the InvenTree API """
from rest_framework import status
from django.urls import reverse
from InvenTree.api_tester import InvenTreeAPITestCase
from users.models import RuleSet
from base64 import b64encode
class APITests(InvenTreeAPITestCase):
""" Tests for the InvenTree API """
fixtures = [
'location',
'stock',
'part',
'category',
]
token = None
auto_login = False
def setUp(self):
super().setUp()
def basicAuth(self):
# Use basic authentication
authstring = bytes("{u}:{p}".format(u=self.username, p=self.password), "ascii")
# Use "basic" auth by default
auth = b64encode(authstring).decode("ascii")
self.client.credentials(HTTP_AUTHORIZATION="Basic {auth}".format(auth=auth))
def tokenAuth(self):
self.basicAuth()
token_url = reverse('api-token')
response = self.client.get(token_url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
token = response.data['token']
self.token = token
def token_failure(self):
# Test token endpoint without basic auth
url = reverse('api-token')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertIsNone(self.token)
def token_success(self):
self.tokenAuth()
self.assertIsNotNone(self.token)
def test_info_view(self):
"""
Test that we can read the 'info-view' endpoint.
"""
url = reverse('api-inventree-info')
response = self.client.get(url, format='json')
data = response.json()
self.assertIn('server', data)
self.assertIn('version', data)
self.assertIn('instance', data)
self.assertEquals('InvenTree', data['server'])
def test_role_view(self):
"""
Test that we can access the 'roles' view for the logged in user.
Also tests that it is *not* accessible if the client is not logged in.
"""
url = reverse('api-user-roles')
response = self.client.get(url, format='json')
# Not logged in, so cannot access user role data
self.assertTrue(response.status_code in [401, 403])
# Now log in!
self.basicAuth()
response = self.get(url)
data = response.data
self.assertIn('user', data)
self.assertIn('username', data)
self.assertIn('is_staff', data)
self.assertIn('is_superuser', data)
self.assertIn('roles', data)
roles = data['roles']
role_names = roles.keys()
# By default, 'view' permissions are provided
for rule in RuleSet.RULESET_NAMES:
self.assertIn(rule, role_names)
self.assertIn('view', roles[rule])
self.assertNotIn('add', roles[rule])
self.assertNotIn('change', roles[rule])
self.assertNotIn('delete', roles[rule])
def test_with_superuser(self):
"""
Superuser should have *all* roles assigned
"""
self.user.is_superuser = True
self.user.save()
self.basicAuth()
response = self.get(reverse('api-user-roles'))
roles = response.data['roles']
for rule in RuleSet.RULESET_NAMES:
self.assertIn(rule, roles.keys())
for perm in ['view', 'add', 'change', 'delete']:
self.assertIn(perm, roles[rule])
def test_with_roles(self):
"""
Assign some roles to the user
"""
self.basicAuth()
response = self.get(reverse('api-user-roles'))
self.assignRole('part.delete')
self.assignRole('build.change')
response = self.get(reverse('api-user-roles'))
roles = response.data['roles']
# New role permissions should have been added now
self.assertIn('delete', roles['part'])
self.assertIn('change', roles['build'])
|
[
"django.urls.reverse",
"base64.b64encode"
] |
[((898, 918), 'django.urls.reverse', 'reverse', (['"""api-token"""'], {}), "('api-token')\n", (905, 918), False, 'from django.urls import reverse\n'), ((1263, 1283), 'django.urls.reverse', 'reverse', (['"""api-token"""'], {}), "('api-token')\n", (1270, 1283), False, 'from django.urls import reverse\n'), ((1678, 1707), 'django.urls.reverse', 'reverse', (['"""api-inventree-info"""'], {}), "('api-inventree-info')\n", (1685, 1707), False, 'from django.urls import reverse\n'), ((2192, 2217), 'django.urls.reverse', 'reverse', (['"""api-user-roles"""'], {}), "('api-user-roles')\n", (2199, 2217), False, 'from django.urls import reverse\n'), ((3346, 3371), 'django.urls.reverse', 'reverse', (['"""api-user-roles"""'], {}), "('api-user-roles')\n", (3353, 3371), False, 'from django.urls import reverse\n'), ((3762, 3787), 'django.urls.reverse', 'reverse', (['"""api-user-roles"""'], {}), "('api-user-roles')\n", (3769, 3787), False, 'from django.urls import reverse\n'), ((3897, 3922), 'django.urls.reverse', 'reverse', (['"""api-user-roles"""'], {}), "('api-user-roles')\n", (3904, 3922), False, 'from django.urls import reverse\n'), ((703, 724), 'base64.b64encode', 'b64encode', (['authstring'], {}), '(authstring)\n', (712, 724), False, 'from base64 import b64encode\n')]
|
#
#
#
#
# date: 2019-08-20
# author: <NAME>
# python3.6
# Copyright (C) 2019 <NAME> <EMAIL>
#
#import .deep_prior_inpainter as dp
#import .contextual_attention_gan as ca
#import .nearest_neighbours_inpainter as nn
from inpainters import (
deep_prior_inpainter as dp ,
contextual_attention_gan as ca,
nearest_neighbours_inpainter as nn
)
class HoleInpainter(object) :
"""
This class provides an interface to the 3 inpainting techniques.
One of the key parameters is `args` importing arguments input by the user in the inpainting scripts.
"""
def __init__ (self, args , Npix = 128, meshgrid=True ) :
"""
Initialize inpainter with the method given in ``args.method``.
So far the Deep-Prior and GAN architecture are compatible to run on ``128x128`` images.
"""
if args.method =='Deep-Prior':
self.Inpainter = dp.DeepPrior ( (Npix, Npix, 4),
verbose = args.debug, meshgrid=meshgrid )
self.epochs =args.dp_epochs
self.optimizer="Adam"
self.Inpainter.compile(optimizer=self.optimizer )
elif args.method=='Contextual-Attention' :
self.Inpainter = ca.ContextualAttention( modeldir =args.checkpoint_dir
, verbose = args.debug )
elif args.method=='Nearest-Neighbours' :
self.Inpainter = nn.NearestNeighbours(verbose = args.debug, Npix=Npix, tol =args.nn_tol )
self.method = args.method
pass
def __call__(self, reuse ) :
"""
Run inpainting,
**Parameters**
- `reuse`:{bool}
whether to recompile or not the Deep-Prior and GAN neural network.
"""
if self.method== 'Deep-Prior':
return self.DPinpaint(reuse=reuse )
elif self.method== 'Contextual-Attention':
return self.GANinpaint(reuse=reuse )
elif self.method== 'Nearest-Neighbours':
return self.NNinpaint()
def setup_input(self , fname, rdseed=None ) :
"""
Pre-process the flat map by renormalizing and reshaping it
as it required by the inpainting method
"""
self.Inpainter.rdseed = rdseed
return self.Inpainter.setup_input( fname )
def DPinpaint(self,reuse ) :
"""
Set of instructions to inpaint with :class:`DeepPrior`
"""
if reuse :
self.Inpainter.compile (optimizer=self.optimizer)
self.Inpainter.train(self.Inpainter.Z , self.Inpainter.X , epochs=self.epochs )
self.Inpainter.evaluate(self.Inpainter.Z,self.Inpainter.X)
p = self.Inpainter.predict()[0,:,:,0]
p = self.Inpainter.rescale_back(p )
return p
def GANinpaint (self , reuse ) :
"""
Set of instructions to inpaint with
:class:`ContextualAttention`
"""
p = self.Inpainter.predict( reuse )
p = self.Inpainter.rescale_back(p )
return p
def NNinpaint (self ) :
"""
Set of instructions to inpaint with :class:`NearestNeighbours`
"""
return self.Inpainter.predict ( )
|
[
"inpainters.deep_prior_inpainter.DeepPrior",
"inpainters.nearest_neighbours_inpainter.NearestNeighbours",
"inpainters.contextual_attention_gan.ContextualAttention"
] |
[((916, 984), 'inpainters.deep_prior_inpainter.DeepPrior', 'dp.DeepPrior', (['(Npix, Npix, 4)'], {'verbose': 'args.debug', 'meshgrid': 'meshgrid'}), '((Npix, Npix, 4), verbose=args.debug, meshgrid=meshgrid)\n', (928, 984), True, 'from inpainters import deep_prior_inpainter as dp, contextual_attention_gan as ca, nearest_neighbours_inpainter as nn\n'), ((1253, 1325), 'inpainters.contextual_attention_gan.ContextualAttention', 'ca.ContextualAttention', ([], {'modeldir': 'args.checkpoint_dir', 'verbose': 'args.debug'}), '(modeldir=args.checkpoint_dir, verbose=args.debug)\n', (1275, 1325), True, 'from inpainters import deep_prior_inpainter as dp, contextual_attention_gan as ca, nearest_neighbours_inpainter as nn\n'), ((1436, 1504), 'inpainters.nearest_neighbours_inpainter.NearestNeighbours', 'nn.NearestNeighbours', ([], {'verbose': 'args.debug', 'Npix': 'Npix', 'tol': 'args.nn_tol'}), '(verbose=args.debug, Npix=Npix, tol=args.nn_tol)\n', (1456, 1504), True, 'from inpainters import deep_prior_inpainter as dp, contextual_attention_gan as ca, nearest_neighbours_inpainter as nn\n')]
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Snack
# Create your tests here.
class SnacksTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'samer', email = '<EMAIL>', password = '<PASSWORD>'
)
self.snack = Snack.objects.create(
title = 'Chips', description = 'It tastes so delicious', purchaser = self.user
)
def test_StringRepresentation(self):
self.assertEqual(str(self.snack), "Chips")
def test_SnackContent(self):
self.assertEqual(f"{self.snack.title}", 'Chips')
self.assertEqual(f"{self.snack.description}", 'It tastes so delicious')
self.assertEqual(self.snack.purchaser, self.user)
def test_SnackListView(self):
url = reverse('snack_list')
actual = self.client.get(url).status_code
self.assertEqual(actual, 200)
def test_SnackDetailsView(self):
response = self.client.get(reverse('snack_details', args='1'))
self.assertEqual(response.status_code, 200)
def test_SnackCreateView(self):
response = self.client.post(reverse("snack_create"),{"title": "Laiz", "description": "Laiz is delicious", "purchaser": self.user})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Laiz')
self.assertContains(response, 'Laiz is delicious')
self.assertContains(response, 'samer')
def test_SnackUpdateView(self):
response = self.client.post(reverse('snack_update', args='1'), {'title':'Chocolate'})
self.assertContains(response, 'Chocolate')
def test_SnackDeleteView(self):
response = self.client.get(reverse("snack_delete", args="1"))
self.assertEqual(response.status_code, 200)
|
[
"django.urls.reverse",
"django.contrib.auth.get_user_model"
] |
[((876, 897), 'django.urls.reverse', 'reverse', (['"""snack_list"""'], {}), "('snack_list')\n", (883, 897), False, 'from django.urls import reverse\n'), ((1059, 1093), 'django.urls.reverse', 'reverse', (['"""snack_details"""'], {'args': '"""1"""'}), "('snack_details', args='1')\n", (1066, 1093), False, 'from django.urls import reverse\n'), ((1220, 1243), 'django.urls.reverse', 'reverse', (['"""snack_create"""'], {}), "('snack_create')\n", (1227, 1243), False, 'from django.urls import reverse\n'), ((1600, 1633), 'django.urls.reverse', 'reverse', (['"""snack_update"""'], {'args': '"""1"""'}), "('snack_update', args='1')\n", (1607, 1633), False, 'from django.urls import reverse\n'), ((1789, 1822), 'django.urls.reverse', 'reverse', (['"""snack_delete"""'], {'args': '"""1"""'}), "('snack_delete', args='1')\n", (1796, 1822), False, 'from django.urls import reverse\n'), ((237, 253), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (251, 253), False, 'from django.contrib.auth import get_user_model\n')]
|
import factory
from sarafan.events import Publication
from .utils import generate_rnd_hash, generate_rnd_address
class PublicationFactory(factory.Factory):
class Meta:
model = Publication
reply_to = '0x'
magnet = factory.LazyFunction(lambda: generate_rnd_hash()[2:])
source = factory.LazyFunction(generate_rnd_address)
size = 1
retention = 1
|
[
"factory.LazyFunction"
] |
[((304, 346), 'factory.LazyFunction', 'factory.LazyFunction', (['generate_rnd_address'], {}), '(generate_rnd_address)\n', (324, 346), False, 'import factory\n')]
|
#!/usr/bin/env python
# Tests for `xclim` package, command line interface
from __future__ import annotations
import numpy as np
import pytest
import xarray as xr
from click.testing import CliRunner
import xclim
from xclim.cli import cli
from xclim.testing import open_dataset
try:
from dask.distributed import Client
except ImportError:
Client = None
K2C = 273.15
@pytest.mark.parametrize(
"indicators,indnames",
[
([xclim.atmos.tg_mean], ["tg_mean"]),
(
# Note: This test is dependent on indicator name length and terminal dimensions.
[xclim.atmos.tn_mean, xclim.atmos.ice_days],
["tn_mean", "ice_days"],
),
],
)
def test_info(indicators, indnames):
runner = CliRunner()
results = runner.invoke(cli, ["info"] + indnames)
for ind in indicators:
assert ind.title in results.output
assert ind.identifier in results.output
def test_indices():
runner = CliRunner()
results = runner.invoke(cli, ["indices"])
for name, ind in xclim.core.indicator.registry.items():
assert name.lower() in results.output
@pytest.mark.parametrize(
"indicator,indname",
[
(xclim.atmos.heating_degree_days, "heating_degree_days"),
(xclim.land.base_flow_index, "base_flow_index"),
],
)
def test_indicator_help(indicator, indname):
runner = CliRunner()
results = runner.invoke(cli, [indname, "--help"])
for name in indicator.parameters.keys():
if name not in ["ds", "indexer"]:
assert name in results.output
@pytest.mark.parametrize(
"indicator,expected,varnames",
[
("tg_mean", 272.15, ["tas"]),
("dtrvar", 0.0, ["tasmin", "tasmax"]),
("heating_degree_days", 6588.0, ["tas"]),
("solidprcptot", 31622400.0, ["tas", "pr"]),
],
)
def test_normal_computation(
tasmin_series, tasmax_series, pr_series, tmp_path, indicator, expected, varnames
):
tasmin = tasmin_series(np.ones(366) + 270.15, start="1/1/2000")
tasmax = tasmax_series(np.ones(366) + 272.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
ds = xr.Dataset(
data_vars={
"tasmin": tasmin,
"tasmax": tasmax,
"tas": xclim.atmos.tg(tasmin, tasmax),
"pr": pr,
}
)
input_file = tmp_path / "in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
args = ["-i", str(input_file), "-o", str(output_file), "-v", indicator]
runner = CliRunner()
results = runner.invoke(cli, args)
for varname in varnames:
assert f"Parsed {varname} = {varname}" in results.output
assert "Processing :" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
outvar = list(out.data_vars.values())[0]
np.testing.assert_allclose(outvar[0], expected)
def test_multi_input(tas_series, pr_series, tmp_path):
tas = tas_series(np.ones(366) + 273.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
tas_file = tmp_path / "multi_tas_in.nc"
pr_file = tmp_path / "multi_pr_in.nc"
output_file = tmp_path / "out.nc"
tas.to_dataset().to_netcdf(tas_file)
pr.to_dataset().to_netcdf(pr_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(tmp_path / "multi_*_in.nc"),
"-o",
str(output_file),
"-v",
"solidprcptot",
],
)
assert "Processing : solidprcptot" in results.output
out = xr.open_dataset(output_file)
assert out.solidprcptot.sum() == 0
def test_multi_output(tmp_path):
ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
input_file = tmp_path / "ws_in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"wind_speed_from_vector",
],
)
assert "Processing : wind_speed_from_vector" in results.output
def test_renaming_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.name = "tas"
tas.to_netcdf(input_file)
with xclim.set_options(cf_compliance="warn"):
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tn_mean",
"--tasmin",
"tas",
],
)
assert "Processing : tn_mean" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tn_mean[0] == 1.0
def test_indicator_chain(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tg_mean",
"growing_degree_days",
],
)
assert "Processing : tg_mean" in results.output
assert "Processing : growing_degree_days" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tg_mean[0] == 1.0
assert out.growing_degree_days[0] == 0
def test_missing_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "tn_mean"]
)
assert results.exit_code == 2
assert "'tasmin' was not found in the input dataset." in results.output
@pytest.mark.parametrize(
"options,output",
[
(["--dask-nthreads", "2"], "Error: '--dask-maxmem' must be given"),
(["--chunks", "time:90"], "100% Complete"),
(["--chunks", "time:90,lat:5"], "100% Completed"),
(["--version"], xclim.__version__),
],
)
def test_global_options(tas_series, tmp_path, options, output):
if "dask" in options[0]:
pytest.importorskip("dask.distributed")
tas = tas_series(np.ones(366), start="1/1/2000")
tas = xr.concat([tas] * 10, dim="lat")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
["-i", str(input_file), "-o", str(output_file)] + options + ["tg_mean"],
)
assert output in results.output
def test_suspicious_precipitation_flags(pr_series, tmp_path):
bad_pr = pr_series(np.zeros(365), start="1971-01-01")
# Add some strangeness
bad_pr[8] = -1e-6 # negative values
bad_pr[120] = 301 / 3600 / 24 # 301mm/day
bad_pr[121:141] = 1.1574074074074072e-05 # 1mm/day
bad_pr[200:300] = 5.787037037037036e-05 # 5mm/day
input_file = tmp_path / "bad_pr.nc"
output_file = tmp_path / "out.nc"
bad_pr.to_netcdf(input_file)
runner = CliRunner()
runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "dataflags", "pr"]
)
with xr.open_dataset(output_file) as ds:
for var in ds.data_vars:
assert var
@pytest.mark.slow
def test_dataflags_output(tmp_path, tas_series, tasmax_series, tasmin_series):
ds = xr.Dataset()
for series, val in zip([tas_series, tasmax_series, tasmin_series], [0, 10, -10]):
vals = val + K2C + np.sin(np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
ds = xr.merge([ds, arr])
input_file = tmp_path / "ws_in.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"dataflags",
"-r",
],
)
assert "Dataset passes quality control checks!" in results.output
def test_bad_usage(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
# No command
results = runner.invoke(cli, ["-i", str(input_file)])
assert "Missing command" in results.output
# Indicator not found:
results = runner.invoke(cli, ["info", "mean_ether_velocity"])
assert "Indicator 'mean_ether_velocity' not found in xclim" in results.output
# No input file given
results = runner.invoke(cli, ["-o", str(output_file), "base_flow_index"])
assert "No input file name given" in results.output
# No output file given
results = runner.invoke(cli, ["-i", str(input_file), "tg_mean"])
assert "No output file name given" in results.output
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"--dask-nthreads",
"2",
"tg_mean",
],
)
if Client is None: # dask.distributed not installed
assert "distributed scheduler is not installed" in results.output
else:
assert "'--dask-maxmem' must be given" in results.output
@pytest.mark.requires_docs
@pytest.mark.parametrize("method, pattern", [("-r", "`GH/"), ("-m", "[GH/")])
def test_release_notes(method, pattern):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", method],
)
assert ":pull:`" not in results.output
assert ":issue:`" not in results.output
assert ":user:`" not in results.output
assert pattern in results.output
@pytest.mark.parametrize(
"method, error",
[
(
["-m", "-r"],
"Cannot return both Markdown and ReStructuredText in same release_notes call.",
),
(list(), "Must specify Markdown (-m) or ReStructuredText (-r)."),
],
)
def test_release_notes_failure(method, error):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", *method],
)
assert error in results.output
def test_show_version_info(capsys):
runner = CliRunner()
results = runner.invoke(cli, ["show_version_info"])
assert "INSTALLED VERSIONS" in results.output
assert "python" in results.output
assert "boltons: installed" in results.output
|
[
"pytest.importorskip",
"xclim.atmos.tg",
"xclim.core.indicator.registry.items",
"xarray.open_dataset",
"numpy.zeros",
"numpy.ones",
"xarray.concat",
"xarray.Dataset",
"xarray.merge",
"numpy.arange",
"pytest.mark.parametrize",
"xclim.set_options",
"numpy.testing.assert_allclose",
"click.testing.CliRunner",
"xclim.testing.open_dataset"
] |
[((380, 547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicators,indnames"""', "[([xclim.atmos.tg_mean], ['tg_mean']), ([xclim.atmos.tn_mean, xclim.atmos.\n ice_days], ['tn_mean', 'ice_days'])]"], {}), "('indicators,indnames', [([xclim.atmos.tg_mean], [\n 'tg_mean']), ([xclim.atmos.tn_mean, xclim.atmos.ice_days], ['tn_mean',\n 'ice_days'])])\n", (403, 547), False, 'import pytest\n'), ((1139, 1302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicator,indname"""', "[(xclim.atmos.heating_degree_days, 'heating_degree_days'), (xclim.land.\n base_flow_index, 'base_flow_index')]"], {}), "('indicator,indname', [(xclim.atmos.\n heating_degree_days, 'heating_degree_days'), (xclim.land.\n base_flow_index, 'base_flow_index')])\n", (1162, 1302), False, 'import pytest\n'), ((1584, 1804), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicator,expected,varnames"""', "[('tg_mean', 272.15, ['tas']), ('dtrvar', 0.0, ['tasmin', 'tasmax']), (\n 'heating_degree_days', 6588.0, ['tas']), ('solidprcptot', 31622400.0, [\n 'tas', 'pr'])]"], {}), "('indicator,expected,varnames', [('tg_mean', 272.15,\n ['tas']), ('dtrvar', 0.0, ['tasmin', 'tasmax']), ('heating_degree_days',\n 6588.0, ['tas']), ('solidprcptot', 31622400.0, ['tas', 'pr'])])\n", (1607, 1804), False, 'import pytest\n'), ((6190, 6445), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options,output"""', '[([\'--dask-nthreads\', \'2\'], "Error: \'--dask-maxmem\' must be given"), ([\n \'--chunks\', \'time:90\'], \'100% Complete\'), ([\'--chunks\', \'time:90,lat:5\'\n ], \'100% Completed\'), ([\'--version\'], xclim.__version__)]'], {}), '(\'options,output\', [([\'--dask-nthreads\', \'2\'],\n "Error: \'--dask-maxmem\' must be given"), ([\'--chunks\', \'time:90\'],\n \'100% Complete\'), ([\'--chunks\', \'time:90,lat:5\'], \'100% Completed\'), ([\n \'--version\'], xclim.__version__)])\n', (6213, 6445), False, 'import pytest\n'), ((9703, 9779), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method, pattern"""', "[('-r', '`GH/'), ('-m', '[GH/')]"], {}), "('method, pattern', [('-r', '`GH/'), ('-m', '[GH/')])\n", (9726, 9779), False, 'import pytest\n'), ((751, 762), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (760, 762), False, 'from click.testing import CliRunner\n'), ((971, 982), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (980, 982), False, 'from click.testing import CliRunner\n'), ((1051, 1088), 'xclim.core.indicator.registry.items', 'xclim.core.indicator.registry.items', ([], {}), '()\n', (1086, 1088), False, 'import xclim\n'), ((1385, 1396), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1394, 1396), False, 'from click.testing import CliRunner\n'), ((2535, 2546), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2544, 2546), False, 'from click.testing import CliRunner\n'), ((2781, 2809), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (2796, 2809), True, 'import xarray as xr\n'), ((2859, 2906), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['outvar[0]', 'expected'], {}), '(outvar[0], expected)\n', (2885, 2906), True, 'import numpy as np\n'), ((3296, 3307), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3305, 3307), False, 'from click.testing import CliRunner\n'), ((3602, 3630), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (3617, 3630), True, 'import xarray as xr\n'), ((3714, 3771), 'xclim.testing.open_dataset', 'open_dataset', (['"""ERA5/daily_surface_cancities_1990-1993.nc"""'], {}), "('ERA5/daily_surface_cancities_1990-1993.nc')\n", (3726, 3771), False, 'from xclim.testing import open_dataset\n'), ((3892, 3903), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3901, 3903), False, 'from click.testing import CliRunner\n'), ((4918, 4946), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (4933, 4946), True, 'import xarray as xr\n'), ((5203, 5214), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5212, 5214), False, 'from click.testing import CliRunner\n'), ((5629, 5657), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (5644, 5657), True, 'import xarray as xr\n'), ((5958, 5969), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5967, 5969), False, 'from click.testing import CliRunner\n'), ((6687, 6719), 'xarray.concat', 'xr.concat', (['([tas] * 10)'], {'dim': '"""lat"""'}), "([tas] * 10, dim='lat')\n", (6696, 6719), True, 'import xarray as xr\n'), ((6840, 6851), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6849, 6851), False, 'from click.testing import CliRunner\n'), ((7492, 7503), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7501, 7503), False, 'from click.testing import CliRunner\n'), ((7818, 7830), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (7828, 7830), True, 'import xarray as xr\n'), ((8147, 8158), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8156, 8158), False, 'from click.testing import CliRunner\n'), ((8605, 8616), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8614, 8616), False, 'from click.testing import CliRunner\n'), ((9834, 9845), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (9843, 9845), False, 'from click.testing import CliRunner\n'), ((10433, 10444), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10442, 10444), False, 'from click.testing import CliRunner\n'), ((10615, 10626), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10624, 10626), False, 'from click.testing import CliRunner\n'), ((2119, 2131), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2126, 2131), True, 'import numpy as np\n'), ((3045, 3057), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (3052, 3057), True, 'import numpy as np\n'), ((4264, 4276), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (4271, 4276), True, 'import numpy as np\n'), ((4431, 4470), 'xclim.set_options', 'xclim.set_options', ([], {'cf_compliance': '"""warn"""'}), "(cf_compliance='warn')\n", (4448, 4470), False, 'import xclim\n'), ((4489, 4500), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4498, 4500), False, 'from click.testing import CliRunner\n'), ((5051, 5063), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (5058, 5063), True, 'import numpy as np\n'), ((5806, 5818), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (5813, 5818), True, 'import numpy as np\n'), ((6584, 6623), 'pytest.importorskip', 'pytest.importorskip', (['"""dask.distributed"""'], {}), "('dask.distributed')\n", (6603, 6623), False, 'import pytest\n'), ((6645, 6657), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (6652, 6657), True, 'import numpy as np\n'), ((7105, 7118), 'numpy.zeros', 'np.zeros', (['(365)'], {}), '(365)\n', (7113, 7118), True, 'import numpy as np\n'), ((7618, 7646), 'xarray.open_dataset', 'xr.open_dataset', (['output_file'], {}), '(output_file)\n', (7633, 7646), True, 'import xarray as xr\n'), ((8045, 8064), 'xarray.merge', 'xr.merge', (['[ds, arr]'], {}), '([ds, arr])\n', (8053, 8064), True, 'import xarray as xr\n'), ((8453, 8465), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (8460, 8465), True, 'import numpy as np\n'), ((1991, 2003), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (1998, 2003), True, 'import numpy as np\n'), ((2059, 2071), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2066, 2071), True, 'import numpy as np\n'), ((2985, 2997), 'numpy.ones', 'np.ones', (['(366)'], {}), '(366)\n', (2992, 2997), True, 'import numpy as np\n'), ((2271, 2301), 'xclim.atmos.tg', 'xclim.atmos.tg', (['tasmin', 'tasmax'], {}), '(tasmin, tasmax)\n', (2285, 2301), False, 'import xclim\n'), ((7959, 7977), 'numpy.arange', 'np.arange', (['(366 * 3)'], {}), '(366 * 3)\n', (7968, 7977), True, 'import numpy as np\n')]
|
from multiprocessing import Process, Queue
import os
import struct
import tempfile
import unittest
import random
from logging import getLogger
from nose.plugins.attrib import attr
from past.builtins import basestring
import cloudsigma.resource as cr
import cloudsigma.errors as errors
from testing.utils import DumpResponse
from testing.acceptance.common import StatefulResourceTestBase
LOG = getLogger(__name__)
@attr('acceptance_test')
class DriveBasicTest(StatefulResourceTestBase):
def setUp(self):
super(DriveBasicTest, self).setUp()
self.client = cr.Drive()
self.dump_response = DumpResponse(clients=[self.client])
@attr('docs_snippets')
def test_drive_cycle(self):
drive_def = {
'name': 'test_drive_1',
'size': 1024000000,
'media': 'disk',
}
with self.dump_response('drive_create_minimal'):
drive = self.client.create(drive_def)
drive_uuid = drive['uuid']
self.assertEqual(drive['status'], 'creating')
self._wait_for_status(drive_uuid, 'unmounted')
with self.dump_response('drive_get_unmounted'):
drive = self.client.get(drive_uuid)
with self.dump_response('drive_update_meta'):
drive['meta'] = {
'meta_key1': 'value',
'meta_key2': 'value\nwith\nnew lines'
}
updated_drive = self.client.update(drive_uuid, drive)
self.assertEqual(drive['meta'], updated_drive['meta'])
with self.dump_response('drive_delete'):
self.client.delete(drive_uuid)
self._wait_deleted(drive_uuid)
@attr('docs_snippets')
def test_drive_resize(self):
DRIVE_CREATE_SIZE = 2 * 1024 ** 3
drive_def = {
'name': 'test_drive_1',
'size': DRIVE_CREATE_SIZE,
'media': 'disk',
}
drive = self.client.create(drive_def)
self.assertEqual(drive['status'], 'creating')
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
DRIVE_NEW_SIZE = DRIVE_CREATE_SIZE + 3 * 1024 ** 3
with self.dump_response('drive_resize'):
drive_def['size'] = DRIVE_NEW_SIZE
resizing_drive = self.client.update(drive['uuid'], drive_def)
self.assertEqual(resizing_drive['status'], 'resizing')
self._wait_for_status(resizing_drive['uuid'], 'unmounted')
resized_drive = self.client.get(drive['uuid'])
self.assertEqual(
int(resized_drive['size']),
DRIVE_NEW_SIZE,
'Size mismatch after drive resize'
)
DRIVE_NEW_ODD_SIZE = DRIVE_NEW_SIZE + 1*1024**3 + 7*1024**2 + 3*1024
drive_def['size'] = DRIVE_NEW_ODD_SIZE
resizing_drive = self.client.update(drive['uuid'], drive_def)
self.assertEqual(resizing_drive['status'], 'resizing')
self._wait_for_status(resizing_drive['uuid'], 'unmounted')
ALLOWED_SIZE_ROUNDING = 64 * 1024
resized_drive = self.client.get(drive['uuid'])
self.assertNotEqual(
int(resized_drive['size']),
DRIVE_NEW_SIZE,
'Size of {!r} did not change'.format(drive['uuid'])
)
self.assertLess(
abs(DRIVE_NEW_ODD_SIZE-int(resized_drive['size'])),
ALLOWED_SIZE_ROUNDING,
'New size differs with more than %d bytes, requested size %d '
'bytes, reported size after resize %d bytes' % (
ALLOWED_SIZE_ROUNDING,
DRIVE_NEW_ODD_SIZE,
resized_drive['size']
)
)
self.client.delete(drive['uuid'])
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_resize_action(self):
DRIVE_CREATE_SIZE = 2 * 1024 ** 3
drive_def = {
'name': 'test_drive_1',
'size': DRIVE_CREATE_SIZE,
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
drive['size'] = 2 * drive['size']
with self.dump_response('drive_resize_action'):
self.client.resize(drive['uuid'], drive)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CREATED
)
resized_drive = self.client.get(drive['uuid'])
self.assertEqual(resized_drive['size'], drive['size'])
self.client.delete(drive['uuid'])
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_listing(self):
req = [
{
'name': 'test_drive_%i' % i,
'size': '1024000000',
'media': 'disk',
} for i in range(5)
]
with self.dump_response('drive_create_bulk'):
drives = self.client.create(req)
for drive in drives:
self._wait_for_status(drive['uuid'], 'unmounted')
# Get the short list of fields
with self.dump_response('drive_list'):
self.client.list()
# Get just a list of uuids
with self.dump_response('drive_list_just_uuid_and_status'):
just_uuids = self.client.list(query_params={'fields':'uuid,status'})
for el in just_uuids:
self.assertEqual(set(el.keys()), {'uuid', 'status'})
# Get detailed information on drives
with self.dump_response('drive_list_detail'):
self.client.list_detail()
for drive in drives:
self.client.delete(drive['uuid'])
for drive in drives:
self._wait_deleted(drive['uuid'])
@attr('docs_snippets')
def test_drive_edit(self):
drive_def = {
'name': 'test_drive_x',
'size': 1024000000,
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(drive['uuid'], 'unmounted')
drive_def['name'] = 'test_drive_y'
drive_def['media'] = 'cdrom'
with self.dump_response('drive_edit'):
updated_drive = self.client.update(drive['uuid'], drive_def)
self.assertDictContainsSubset(drive_def, updated_drive)
self.client.delete(updated_drive['uuid'])
self._wait_deleted(updated_drive['uuid'])
@attr('docs_snippets')
def test_drive_clone(self):
drive_def = {
'name': 'test_drive_x',
'size': '1024000000',
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
clone_drive_def = {
'name': 'test_drive_y',
'media': 'cdrom',
'affinities': [],
}
with self.dump_response('drive_clone'):
cloned_drive = self.client.clone(drive['uuid'], clone_drive_def)
self._wait_for_status(
cloned_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self.client.delete(drive['uuid'])
self.client.delete(cloned_drive['uuid'])
self._wait_deleted(cloned_drive['uuid'], timeout=60)
self._wait_deleted(drive['uuid'], timeout=60)
def test_drive_avoid(self):
drive_def = {
'name': 'test_drive_x',
'size': '1024000000',
'media': 'disk',
}
drive = self.client.create(drive_def)
self._wait_for_status(
drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
clone_drive_def = {
'name': 'test_drive_y',
'media': 'cdrom',
'affinities': [],
}
cloned_drive = self.client.clone(
drive['uuid'],
clone_drive_def,
avoid=drive['uuid']
)
another_drive = self.client.create(drive_def, avoid=drive['uuid'])
self._wait_for_status(
cloned_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self._wait_for_status(
another_drive['uuid'],
'unmounted',
timeout=self.TIMEOUT_DRIVE_CLONING
)
self.client.delete(drive['uuid'])
self.client.delete(cloned_drive['uuid'])
self.client.delete(another_drive['uuid'])
self._wait_deleted(cloned_drive['uuid'], timeout=60)
self._wait_deleted(drive['uuid'], timeout=60)
self._wait_deleted(another_drive['uuid'], timeout=60)
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('drive_schema'):
self.client.get_schema()
@attr('acceptance_test')
class LibraryDriveTest(StatefulResourceTestBase):
def _gen_server_definition(self, drives=[], changed_def={}):
drive_tmp = {
"device": "virtio",
"dev_channel": "0:0",
"drive": None,
"boot_order": 1
}
server_def = {
'name': 'testServerAcc',
'cpu': 1000,
'mem': 512 * 1024 ** 2,
'vnc_password': '<PASSWORD>',
'drives': [],
}
server_def.update(changed_def)
for drive in drives:
if isinstance(drive, dict):
drive = server_def['drives'].append(drive)
elif isinstance(drive, basestring):
guest_drive = drive_tmp.copy()
guest_drive['drive'] = drive
drive = guest_drive
else:
drive = None
if drive is not None:
server_def['drives'].append(drive)
return server_def
def setUp(self):
super(LibraryDriveTest, self).setUp()
self.client = cr.LibDrive()
self.dump_response = DumpResponse(clients=[self.client])
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('libdrive_schema'):
self.client.get_schema()
@attr('docs_snippets')
def test_libdrive_listing(self):
with self.dump_response('libdrive_list'):
libdrives = self.client.list(query_params={'limit': 5})
# Select the lib drive with most interesting attributes.
# By default use the first possible
libdrive_uuid = libdrives[0]['uuid']
for d in libdrives:
# pick a drive with licenses
if len(d['licenses']) > 0:
libdrive_uuid = d['uuid']
break
with self.dump_response('libdrive_get'):
libdrive = self.client.get(libdrive_uuid)
dc = cr.Drive()
with DumpResponse(clients=[dc])('librdrive_get_through_drives'):
libdrive_from_drive_url = dc.get(libdrive_uuid)
self.assertIsNone(libdrive_from_drive_url['owner'])
self.assertEqual(libdrive['uuid'], libdrive_from_drive_url['uuid'])
self.assertEqual(libdrive['name'], libdrive_from_drive_url['name'])
def test_attaching_cdrom(self):
server_client = cr.Server()
found = None
for drive in self.client.list():
if drive['media'] == 'cdrom':
found = drive
break
if found is None:
raise unittest.SkipTest(
'Cannot find a cdrom drive in drives library'
)
guest_def = self._gen_server_definition(drives=[found['uuid']])
new_guest = server_client.create(guest_def)
server_client.delete(new_guest['uuid'])
self._wait_deleted(new_guest['uuid'], client=server_client)
def test_attaching_preinstalled(self):
server_client = cr.Server()
found = None
for drive in self.client.list():
if drive['media'] == 'disk':
found = drive
break
if found is None:
raise unittest.SkipTest(
'Cannot find a preinstalled drive in the drives library.'
)
guest_def = self._gen_server_definition(drives=[found['uuid']])
with self.assertRaises(errors.PermissionError):
server_client.create(guest_def)
@attr('stress_test')
class DriveStressTest(StatefulResourceTestBase):
CLONE_COUNT = 20
DRIVE_COUNT = 100
def setUp(self):
super(DriveStressTest, self).setUp()
self.client = cr.Drive()
def _get_min_drive_size(self):
return 1 * 1000 ** 3
def test_create_delete(self):
"""Creating MANY small drives via API to see if it works."""
min_size = self._get_min_drive_size()
define_list = [
{
"name": "test_drive_{}".format(num),
"size": min_size,
"media": "disk",
} for num in range(self.DRIVE_COUNT)
]
res = []
print(f'\nCreating Drives ({self.DRIVE_COUNT})', end='', flush=True)
for i, drive_def in enumerate(define_list):
res.append(self.client.create(drive_def))
print(f' {i + 1}', end='', flush=True)
for creating_drive in res:
self._wait_for_status(
creating_drive['uuid'],
status='unmounted',
client=self.client,
timeout=60
)
print('\nDeleting Drives', end='', flush=True)
for i, drive in enumerate(res):
self.client.delete(drive['uuid'])
print(f' {i + 1}', end='', flush=True)
for deleted_drive in res:
self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
def test_clone(self):
"""Clone SOME drives via API to see if it works."""
puuid, ppass = self._get_persistent_image_uuid_and_pass()
cloned = []
print(f'\nCreating Clones ({self.CLONE_COUNT})', end='', flush=True)
for num in range(self.CLONE_COUNT):
cloned.append(
self.client.clone(
puuid,
{'name': "test_atom_clone_{}".format(num)}
)
)
print(f' {num + 1}', end='', flush=True)
for cloning_drive in cloned:
self._wait_for_status(
cloning_drive['uuid'],
status='unmounted',
client=self.client,
timeout=self.TIMEOUT_DRIVE_CLONING
)
print('\nDeleting Clones', end='', flush=True)
for i, drive in enumerate(cloned):
self.client.delete(drive['uuid'])
print(f' {i + 1}', end='', flush=True)
for deleted_drive in cloned:
self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
class TestUpload(StatefulResourceTestBase):
def setUp(self):
super(TestUpload, self).setUp()
# 10.something MiB
self.file_size = 10 * 1024 ** 2 + random.randrange(0, 1024)
self.file_path = self.generate_file()
# self.downloaded_path = tempfile.mktemp(prefix='test_download_')
self.dc = cr.Drive()
def tearDown(self):
super(TestUpload, self).tearDown()
os.remove(self.file_path)
# os.remove(self.downloaded_path)
def generate_file(self):
fd, path = tempfile.mkstemp(prefix='drive_upload_test')
os.fdopen(fd).close()
with open(path, 'r+b') as f:
written = 0
# write 64 bit random values
data = struct.pack('=Q', random.randrange(0, 2 ** 64)) * 128 * 4
while written + 1024 * 4 <= self.file_size:
f.write(data)
written += 1024 * 4
# write 8 bit random values until we reach required size
while written < self.file_size:
f.write(chr(random.randrange(0, 2 ** 8)).encode())
written += 1
return path
def test_resumable_upload(self):
from cloudsigma.resumable_upload import Upload
def do_upload(queue):
up = Upload(
self.file_path,
chunk_size=1024 ** 2,
drive_name='test_drive_upload'
)
up.upload()
queue.put((up.drive_uuid, up.uploaded_size))
queue = Queue()
proc = Process(target=do_upload, args=(queue,))
proc.start()
proc.join(2 * 60)
if proc.is_alive():
proc.terminate()
raise Exception('Upload did not finish in time')
uuid, uploaded_size = queue.get(block=False)
LOG.debug('Finished uploading {}'.format(uuid))
self.assertEqual(uploaded_size, os.path.getsize(self.file_path))
drive = self.dc.get(uuid)
self.assertEqual(drive['status'], 'unmounted')
self.dc.delete(uuid)
|
[
"os.remove",
"cloudsigma.resource.LibDrive",
"tempfile.mkstemp",
"cloudsigma.resumable_upload.Upload",
"testing.utils.DumpResponse",
"os.path.getsize",
"cloudsigma.resource.Server",
"random.randrange",
"multiprocessing.Queue",
"unittest.SkipTest",
"os.fdopen",
"multiprocessing.Process",
"cloudsigma.resource.Drive",
"nose.plugins.attrib.attr",
"logging.getLogger"
] |
[((397, 416), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (406, 416), False, 'from logging import getLogger\n'), ((420, 443), 'nose.plugins.attrib.attr', 'attr', (['"""acceptance_test"""'], {}), "('acceptance_test')\n", (424, 443), False, 'from nose.plugins.attrib import attr\n'), ((8964, 8987), 'nose.plugins.attrib.attr', 'attr', (['"""acceptance_test"""'], {}), "('acceptance_test')\n", (8968, 8987), False, 'from nose.plugins.attrib import attr\n'), ((12440, 12459), 'nose.plugins.attrib.attr', 'attr', (['"""stress_test"""'], {}), "('stress_test')\n", (12444, 12459), False, 'from nose.plugins.attrib import attr\n'), ((662, 683), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (666, 683), False, 'from nose.plugins.attrib import attr\n'), ((1664, 1685), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (1668, 1685), False, 'from nose.plugins.attrib import attr\n'), ((3787, 3808), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (3791, 3808), False, 'from nose.plugins.attrib import attr\n'), ((4716, 4737), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (4720, 4737), False, 'from nose.plugins.attrib import attr\n'), ((5847, 5868), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (5851, 5868), False, 'from nose.plugins.attrib import attr\n'), ((6508, 6529), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (6512, 6529), False, 'from nose.plugins.attrib import attr\n'), ((8822, 8843), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (8826, 8843), False, 'from nose.plugins.attrib import attr\n'), ((10136, 10157), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (10140, 10157), False, 'from nose.plugins.attrib import attr\n'), ((10284, 10305), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (10288, 10305), False, 'from nose.plugins.attrib import attr\n'), ((580, 590), 'cloudsigma.resource.Drive', 'cr.Drive', ([], {}), '()\n', (588, 590), True, 'import cloudsigma.resource as cr\n'), ((620, 655), 'testing.utils.DumpResponse', 'DumpResponse', ([], {'clients': '[self.client]'}), '(clients=[self.client])\n', (632, 655), False, 'from testing.utils import DumpResponse\n'), ((10051, 10064), 'cloudsigma.resource.LibDrive', 'cr.LibDrive', ([], {}), '()\n', (10062, 10064), True, 'import cloudsigma.resource as cr\n'), ((10094, 10129), 'testing.utils.DumpResponse', 'DumpResponse', ([], {'clients': '[self.client]'}), '(clients=[self.client])\n', (10106, 10129), False, 'from testing.utils import DumpResponse\n'), ((10906, 10916), 'cloudsigma.resource.Drive', 'cr.Drive', ([], {}), '()\n', (10914, 10916), True, 'import cloudsigma.resource as cr\n'), ((11324, 11335), 'cloudsigma.resource.Server', 'cr.Server', ([], {}), '()\n', (11333, 11335), True, 'import cloudsigma.resource as cr\n'), ((11943, 11954), 'cloudsigma.resource.Server', 'cr.Server', ([], {}), '()\n', (11952, 11954), True, 'import cloudsigma.resource as cr\n'), ((12641, 12651), 'cloudsigma.resource.Drive', 'cr.Drive', ([], {}), '()\n', (12649, 12651), True, 'import cloudsigma.resource as cr\n'), ((15303, 15313), 'cloudsigma.resource.Drive', 'cr.Drive', ([], {}), '()\n', (15311, 15313), True, 'import cloudsigma.resource as cr\n'), ((15390, 15415), 'os.remove', 'os.remove', (['self.file_path'], {}), '(self.file_path)\n', (15399, 15415), False, 'import os\n'), ((15507, 15551), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""drive_upload_test"""'}), "(prefix='drive_upload_test')\n", (15523, 15551), False, 'import tempfile\n'), ((16496, 16503), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16501, 16503), False, 'from multiprocessing import Process, Queue\n'), ((16519, 16559), 'multiprocessing.Process', 'Process', ([], {'target': 'do_upload', 'args': '(queue,)'}), '(target=do_upload, args=(queue,))\n', (16526, 16559), False, 'from multiprocessing import Process, Queue\n'), ((11538, 11602), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Cannot find a cdrom drive in drives library"""'], {}), "('Cannot find a cdrom drive in drives library')\n", (11555, 11602), False, 'import unittest\n'), ((12156, 12232), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Cannot find a preinstalled drive in the drives library."""'], {}), "('Cannot find a preinstalled drive in the drives library.')\n", (12173, 12232), False, 'import unittest\n'), ((15139, 15164), 'random.randrange', 'random.randrange', (['(0)', '(1024)'], {}), '(0, 1024)\n', (15155, 15164), False, 'import random\n'), ((16257, 16333), 'cloudsigma.resumable_upload.Upload', 'Upload', (['self.file_path'], {'chunk_size': '(1024 ** 2)', 'drive_name': '"""test_drive_upload"""'}), "(self.file_path, chunk_size=1024 ** 2, drive_name='test_drive_upload')\n", (16263, 16333), False, 'from cloudsigma.resumable_upload import Upload\n'), ((16876, 16907), 'os.path.getsize', 'os.path.getsize', (['self.file_path'], {}), '(self.file_path)\n', (16891, 16907), False, 'import os\n'), ((10930, 10956), 'testing.utils.DumpResponse', 'DumpResponse', ([], {'clients': '[dc]'}), '(clients=[dc])\n', (10942, 10956), False, 'from testing.utils import DumpResponse\n'), ((15561, 15574), 'os.fdopen', 'os.fdopen', (['fd'], {}), '(fd)\n', (15570, 15574), False, 'import os\n'), ((15722, 15750), 'random.randrange', 'random.randrange', (['(0)', '(2 ** 64)'], {}), '(0, 2 ** 64)\n', (15738, 15750), False, 'import random\n'), ((16027, 16054), 'random.randrange', 'random.randrange', (['(0)', '(2 ** 8)'], {}), '(0, 2 ** 8)\n', (16043, 16054), False, 'import random\n')]
|
# rct_patch.py
#
# Author: jeFF0Falltrades
#
# A patching script for the Roller Coaster Tycoon (1999) game
# executable for play on modern systems at full resolution.
#
# Homepage with Video Tutorial:
# https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser, RawTextHelpFormatter
from os.path import isfile
# Dict of both hardcoded and variable values to be checked/patched
PATCHES = {
'FULL_SCREEN': {
# Patches default window function to use full screen mode
'E8 86 7A FF FF': 'E8 33 7A FF FF'
},
'WINDOWED': {
# Patches maximum allowable resolution for windowed mode
'00 05 00 00 0F 8E 07 00 00 00 C7 45 FC 00 05 00 00 81 7D F4 00 04 00 00 0F 8E 07 00 00 00 C7 45 F4 00 04 00 00':
'{wl} {wh} 00 00 0F 8E 07 00 00 00 C7 45 FC {wl} {wh} 00 00 81 7D F4 {hl} {hh} 00 00 0F 8E 07 00 00 00 C7 45 F4 {hl} {hh} 00 00'
}
}
# Gets command line arguments
def getCLAs():
ap = ArgumentParser(
description=
'Roller Coaster Tycoon (1999) Full Resolution Patch by jeFF0Falltrades\n\nHomepage: https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res',
formatter_class=RawTextHelpFormatter)
sp = ap.add_subparsers(dest='cmd')
auto = sp.add_parser(
'auto',
help=
'Attempt to patch the program automatically (Patches for full screen mode by default)'
)
auto.add_argument('width', help='Your desired resolution width')
auto.add_argument('height', help='Your desired resolution height')
auto.add_argument(
'-t',
'--target',
default='RCT.exe',
help='Full path to RCT.EXE (defaults to local directory)')
auto.add_argument(
'-o',
'--outfile',
default='rct_patched.exe',
help='Desired output file name (defaults to `rct_patched.exe`)')
auto.add_argument('-w',
'--windowed',
action='store_true',
help='Patch for windowed mode only')
check = sp.add_parser(
'check', help='Check a file for compatibility with auto-patching mode')
check.add_argument(
'-t',
'--target',
default='RCT.exe',
help='Full path to RCT.EXE (defaults to local directory)')
man = sp.add_parser(
'manual',
help=
'Do not patch the file, just show the necessary hex replacements for manual search/replace with a hex editor'
)
man.add_argument('width', help='Your desired resolution width')
man.add_argument('height', help='Your desired resolution height')
return ap.parse_args()
# Populates empty dictionary values based on user input
def populateVals(w, h):
try:
w = int(w)
h = int(h)
except ValueError:
raise SystemExit(
'Invalid width and height values received: {}x{}'.format(
args.width, args.height))
for key in PATCHES['WINDOWED']:
PATCHES['WINDOWED'][key] = PATCHES['WINDOWED'][key].format(
wl=hex(w & 0XFF).replace('0x', '').zfill(2),
wh=hex((w & 0XFF00) >> 8).replace('0x', '').zfill(2),
hl=hex(h & 0XFF).replace('0x', '').zfill(2),
hh=hex((h & 0XFF00) >> 8).replace('0x', '').zfill(2))
# Checks if default values are found in target file
def fileCheck(fp):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for key in PATCHES:
for def_val in PATCHES[key]:
if data.find(bytearray.fromhex(def_val)) == -1:
return False
return True
# Prints hex string replacements for manual patching
def printReplacements():
print('\n{}\n\t--> {}\n\n'.format('Search String', 'Replacement'))
for key in PATCHES:
for k, v in PATCHES[key].items():
print('{}\n\t--> {}\n'.format(k, v))
# Patches for full screen mode
def patchFullScreen(fp, outfile):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for key in PATCHES:
for k, v in PATCHES[key].items():
data = data.replace(bytearray.fromhex(k), bytearray.fromhex(v))
with open(outfile, 'wb') as o:
o.write(data)
# Patches for windowed mode
def patchWindowed(fp, outfile):
data = ''
with open(fp, 'rb') as f:
data = f.read()
for k, v in PATCHES['WINDOWED'].items():
data = data.replace(bytearray.fromhex(k), bytearray.fromhex(v))
with open(outfile, 'wb') as o:
o.write(data)
# Checks if file exists and passes predefined checks
def doFileChecks(fp):
if not isfile(fp):
raise SystemExit(
'Cannot find file {}. Check file path and try again'.format(fp))
if not fileCheck(fp):
raise SystemExit(
'File failed offset check: {}. Use manual mode for replacements or modify patching script.'
.format(fp))
if __name__ == '__main__':
args = getCLAs()
if args.cmd == 'check':
doFileChecks(args.target)
elif args.cmd == 'manual':
populateVals(args.width, args.height)
printReplacements()
elif args.cmd == 'auto':
populateVals(args.width, args.height)
doFileChecks(args.target)
if args.windowed:
patchWindowed(args.target, args.outfile)
else:
patchFullScreen(args.target, args.outfile)
else:
raise SystemExit(
'Unknown command received. Use `python rct_patch.py -h` for help')
print('Success!')
|
[
"os.path.isfile",
"argparse.ArgumentParser"
] |
[((2088, 2323), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Roller Coaster Tycoon (1999) Full Resolution Patch by jeFF0Falltrades\n\nHomepage: https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res"""', 'formatter_class': 'RawTextHelpFormatter'}), '(description=\n """Roller Coaster Tycoon (1999) Full Resolution Patch by jeFF0Falltrades\n\nHomepage: https://github.com/jeFF0Falltrades/Game-Patches/tree/master/rct_full_res"""\n , formatter_class=RawTextHelpFormatter)\n', (2102, 2323), False, 'from argparse import ArgumentParser, RawTextHelpFormatter\n'), ((5702, 5712), 'os.path.isfile', 'isfile', (['fp'], {}), '(fp)\n', (5708, 5712), False, 'from os.path import isfile\n')]
|
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout
import json
app = QApplication([])
notes = {
"Добро пожаловать!" : {
"текст" : "Это самое лучшее приложение для заметок в мире!",
"теги" : ["добро", "инструкция"]
}
}
with open("notes_data.json", "w") as file:
json.dump(notes, file)
notes_win = QWidget()
notes_win.setWindowTitle('Умные заметки')
notes_win.resize(900, 600)
list_notes = QListWidget()
list_notes_label = QLabel('Список заметок')
button_note_create = QPushButton('Создать заметку')
button_note_del = QPushButton('Удалить заметку')
button_note_save = QPushButton('Сохранить заметку')
field_tag = QLineEdit('')
field_tag.setPlaceholderText('Введите тег...')
field_text = QTextEdit()
button_tag_add = QPushButton('Добавить к заметке')
button_tag_del = QPushButton('Открепить от заметки')
button_tag_search = QPushButton('Искать заметки по тегу')
list_tags = QListWidget()
list_tags_label = QLabel('Список тегов')
#расположение виджетов по лэйаутам
layout_notes = QHBoxLayout()
col_1 = QVBoxLayout()
col_1.addWidget(field_text)
col_2 = QVBoxLayout()
col_2.addWidget(list_notes_label)
col_2.addWidget(list_notes)
row_1 = QHBoxLayout()
row_1.addWidget(button_note_create)
row_1.addWidget(button_note_del)
row_2 = QHBoxLayout()
row_2.addWidget(button_note_save)
col_2.addLayout(row_1)
col_2.addLayout(row_2)
col_2.addWidget(list_tags_label)
col_2.addWidget(list_tags)
col_2.addWidget(field_tag)
row_3 = QHBoxLayout()
row_3.addWidget(button_tag_add)
row_3.addWidget(button_tag_del)
row_4 = QHBoxLayout()
row_4.addWidget(button_tag_search)
col_2.addLayout(row_3)
col_2.addLayout(row_4)
layout_notes.addLayout(col_1, stretch = 2)
layout_notes.addLayout(col_2, stretch = 1)
notes_win.setLayout(layout_notes)
def add_note():
note_name, ok = QInputDialog.getText(notes_win, 'Добавить заметку', 'Название заметки:')
if ok and note_name != '':
notes[notes_name] = {'текст': '', 'теги': []}
list_notes.addItem(note_name)
print(notes)
def show_note():
key = list_notes.selectedItems()[0].text()
print(key)
field_text.setText(notes[key]["текст"])
list_tags.clear()
list_tags.addItems(notes[key]["теги"])
def save_note():
if list_notes.selectedItems():
key = list_notes.selectedItems()[0].text()
notes[key]['текст'] = field_text.toPlainText()
with open ('notes_data.json', 'w') as file:
json.dump(notes, file, sort_keys=True)
print(notes)
else:
print('Заметка для сохранения не выбрана!')
def del_note():
if list_notes.selectedItems():
key = list_notes.selectedItems()[0].text()
del notes[key]
list_notes.clear()
list_tags.clear()
field_text.clear()
list_notes.addItems(notes)
with open('notes_data.json', 'w') as file:
json.dump(notes, file, sort_keys=True)
print(notes)
else:
print('Заметка для удаления не выбрана!')
list_notes.itemClicked.connect(show_note)
button_note_create.clicked.connect(add_note)
button_note_save.clicked.connect(save_note)
button_note_del.clicked.connect(del_note)
notes_win.show()
with open("notes_data.json", "r") as file:
notes = json.load(file)
list_notes.addItems(notes)
app.exec_()
|
[
"PyQt5.QtWidgets.QLabel",
"json.dump",
"json.load",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QInputDialog.getText",
"PyQt5.QtWidgets.QListWidget",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QApplication"
] |
[((214, 230), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['[]'], {}), '([])\n', (226, 230), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((490, 499), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (497, 499), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((588, 601), 'PyQt5.QtWidgets.QListWidget', 'QListWidget', ([], {}), '()\n', (599, 601), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((622, 646), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Список заметок"""'], {}), "('Список заметок')\n", (628, 646), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((672, 702), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Создать заметку"""'], {}), "('Создать заметку')\n", (683, 702), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((723, 753), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Удалить заметку"""'], {}), "('Удалить заметку')\n", (734, 753), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((774, 806), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Сохранить заметку"""'], {}), "('Сохранить заметку')\n", (785, 806), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((823, 836), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['""""""'], {}), "('')\n", (832, 836), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((899, 910), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', ([], {}), '()\n', (908, 910), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((929, 962), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Добавить к заметке"""'], {}), "('Добавить к заметке')\n", (940, 962), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((981, 1016), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Открепить от заметки"""'], {}), "('Открепить от заметки')\n", (992, 1016), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1038, 1075), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Искать заметки по тегу"""'], {}), "('Искать заметки по тегу')\n", (1049, 1075), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1089, 1102), 'PyQt5.QtWidgets.QListWidget', 'QListWidget', ([], {}), '()\n', (1100, 1102), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1122, 1144), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Список тегов"""'], {}), "('Список тегов')\n", (1128, 1144), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1200, 1213), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1211, 1213), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1223, 1236), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1234, 1236), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1278, 1291), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1289, 1291), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1365, 1378), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1376, 1378), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1459, 1472), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1470, 1472), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1658, 1671), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1669, 1671), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((1747, 1760), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1758, 1760), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((445, 467), 'json.dump', 'json.dump', (['notes', 'file'], {}), '(notes, file)\n', (454, 467), False, 'import json\n'), ((2017, 2089), 'PyQt5.QtWidgets.QInputDialog.getText', 'QInputDialog.getText', (['notes_win', '"""Добавить заметку"""', '"""Название заметки:"""'], {}), "(notes_win, 'Добавить заметку', 'Название заметки:')\n", (2037, 2089), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QListWidget, QLineEdit, QTextEdit, QInputDialog, QHBoxLayout, QVBoxLayout, QFormLayout\n'), ((3499, 3514), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3508, 3514), False, 'import json\n'), ((2670, 2708), 'json.dump', 'json.dump', (['notes', 'file'], {'sort_keys': '(True)'}), '(notes, file, sort_keys=True)\n', (2679, 2708), False, 'import json\n'), ((3111, 3149), 'json.dump', 'json.dump', (['notes', 'file'], {'sort_keys': '(True)'}), '(notes, file, sort_keys=True)\n', (3120, 3149), False, 'import json\n')]
|
from unittest import TestCase
from unittest.mock import patch
from app.service.business_service import SuperMan
class TestSuperMan(TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
@patch("app.service.business_service.SuperMan._request_get", return_value="response")
def test_request_get(self, mock_request_get):
super_man = SuperMan(CPF='10387595612')
actual = super_man.to_call()
mock_request_get.assert_called_once_with(resource='5af099f2310000610096c6ee', params={'CPF': '10387595612'})
self.assertEqual(actual, 'response')
|
[
"unittest.mock.patch",
"app.service.business_service.SuperMan"
] |
[((337, 426), 'unittest.mock.patch', 'patch', (['"""app.service.business_service.SuperMan._request_get"""'], {'return_value': '"""response"""'}), "('app.service.business_service.SuperMan._request_get', return_value=\n 'response')\n", (342, 426), False, 'from unittest.mock import patch\n'), ((492, 519), 'app.service.business_service.SuperMan', 'SuperMan', ([], {'CPF': '"""10387595612"""'}), "(CPF='10387595612')\n", (500, 519), False, 'from app.service.business_service import SuperMan\n')]
|
#<NAME>
#<EMAIL>
#github.com/bksec
#####################################
###############RENKLER###############
#####################################
sifirla = '\033[0m'
beyaz = '\033[37m'
kirmizi= '\033[31m'
turuncu = '\u001b[38;5;208m'
yesil= '\033[32m'
sari= '\033[33m'
lacivert= '\033[34m'
pembe= '\033[35m'
mor = '\u001b[38;5;165m'
mavi= '\u001b[38;5;32m'
siyah = '\033[90m'
kahverengi = '\u001b[38;5;95m'
aciksari = '\u001b[38;5;228m'
gri = '\u001b[38;5;246m'
turkuaz = '\u001b[38;5;45m'
pmavi = '\033[96m'#p --> parlak
pkirmizi= '\033[91m'
pyesil = '\033[92m'
psari = '\033[93m'
asiyah= '\033[40m'#a --> arkaplan
akirmizi= '\033[41m'
ayesil= '\033[42m'
asari= '\033[43m'
alacivert= '\033[44m'
amor= '\033[45m'
amavi= '\033[46m'
abeyaz= '\033[47m'
apsiyah= '\033[100m'#a --> arkaplan-parlak
apkirmizi= '\033[101m'
apyesil= '\033[102m'
apsari= '\033[103m'
aplacivert= '\033[104m'
apmor= '\033[105m'
apmavi= '\033[106m'
apbeyaz= '\033[107m'
apsifirla= '\033[0;49m'
#yazi sekilleri
kalin = '\033[1m'
altcizgi = '\033[4m'
parlak = '\033[5m'
tum_renkler=("\033[0msifirla","\033[37mbeyaz","\033[31mkirmizi","\u001b[38;5;208mturuncu","\033[32myesil","\033[33msari","\033[34mlacivert","\033[35mpembe","\u001b[38;5;165mmor","\u001b[38;5;32mmavi","\033[90msiyah","\u001b[38;5;228maciksari","\u001b[38;5;246mgri","\u001b[38;5;95mkahverengi","\033[96mpmavi","\033[91mpkirmizi","\033[92mpyesil","\033[93mpsari\033[0m","\033[40masiyah","\033[41makirmizi","\033[42mayesil","\033[43masari","\033[44malacivert","\033[45mamor","\033[46mamavi","\033[47mabeyaz","\033[101mapkirmizi","\033[102mapyesil","\033[103mapsari","\033[104maplacivert","\033[105mapmor","\033[106mapmavi","\033[107mapbeyaz","\033[0;49mapsifirla","\033[1mkalin","\033[4maltcizgi\033[0m","\033[5mparlak")
def rastgeleRenkler(*rastgele):
import random
rastgeleliste = list(rastgele)
rastgeleRenk = random.choice(rastgeleliste)
return rastgeleRenk
|
[
"random.choice"
] |
[((1881, 1909), 'random.choice', 'random.choice', (['rastgeleliste'], {}), '(rastgeleliste)\n', (1894, 1909), False, 'import random\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME> <<EMAIL>> <https://hanxiao.github.io>
import tensorflow as tf
initializer = tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32)
initializer_relu = tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN',
uniform=False,
dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale=3e-7)
def minus_mask(x, mask, offset=1e30):
"""
masking by subtract a very large number
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:param offset: very large negative number
:return: masked x
"""
return x - tf.expand_dims(1.0 - mask, axis=-1) * offset
def mul_mask(x, mask):
"""
masking by multiply zero
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:return: masked x
"""
return x * tf.expand_dims(mask, axis=-1)
def masked_reduce_mean(x, mask):
return tf.reduce_sum(mul_mask(x, mask), axis=1) / tf.reduce_sum(mask, axis=1, keepdims=True)
def masked_reduce_max(x, mask):
return tf.reduce_max(minus_mask(x, mask), axis=1)
def weighted_sparse_softmax_cross_entropy(labels, preds, weights):
"""
computing sparse softmax cross entropy by weighting differently on classes
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.sparse_softmax_cross_entropy(labels,
logits=preds,
weights=get_bounded_class_weight(labels, weights))
def get_bounded_class_weight(labels, weights, ub=None):
if weights is None:
return 1.0
else:
w = tf.gather(weights, labels)
w = w / tf.reduce_min(w)
w = tf.clip_by_value(1.0 + tf.log1p(w),
clip_value_min=1.0,
clip_value_max=ub if ub is not None else tf.cast(tf.shape(weights)[0], tf.float32) / 2.0)
return w
def weighted_smooth_softmax_cross_entropy(labels, num_labels, preds, weights,
epsilon=0.1):
"""
computing smoothed softmax cross entropy by weighting differently on classes
:param epsilon: smoothing factor
:param num_labels: maximum number of labels
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.softmax_cross_entropy(tf.one_hot(labels, num_labels),
logits=preds,
label_smoothing=epsilon,
weights=get_bounded_class_weight(labels, weights))
def get_var(name, shape, dtype=tf.float32,
initializer_fn=initializer,
regularizer_fn=regularizer, **kwargs):
return tf.get_variable(name, shape,
initializer=initializer_fn,
dtype=dtype,
regularizer=regularizer_fn, **kwargs)
def layer_norm(inputs,
epsilon=1e-8,
scope=None,
reuse=None):
"""Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope or 'Layer_Normalize', reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def linear_logit(x, units, act_fn=None, dropout_keep=1., use_layer_norm=False, scope=None, **kwargs):
with tf.variable_scope(scope or 'linear_logit'):
logit = tf.layers.dense(x, units=units, activation=act_fn,
kernel_initializer=initializer,
kernel_regularizer=regularizer)
# do dropout
logit = tf.nn.dropout(logit, keep_prob=dropout_keep)
if use_layer_norm:
logit = tf.contrib.layers.layer_norm(logit)
return logit
def bilinear_logit(x, units, act_fn=None,
first_units=256,
first_act_fn=tf.nn.relu, scope=None, **kwargs):
with tf.variable_scope(scope or 'bilinear_logit'):
first = linear_logit(x, first_units, act_fn=first_act_fn, scope='first', **kwargs)
return linear_logit(first, units, scope='second', act_fn=act_fn, **kwargs)
def label_smoothing(inputs, epsilon=0.1):
"""Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
"""
K = inputs.get_shape().as_list()[-1] # number of channels
return ((1 - epsilon) * inputs) + (epsilon / K)
def normalize_by_axis(x, axis, smooth_factor=1e-5):
x += smooth_factor
return x / tf.reduce_sum(x, axis, keepdims=True) # num A x num B
def get_cross_correlated_mat(num_out_A, num_out_B, learn_cooc='FIXED', cooc_AB=None, scope=None, reuse=None):
with tf.variable_scope(scope or 'CrossCorrlated_Mat', reuse=reuse):
if learn_cooc == 'FIXED' and cooc_AB is not None:
pB_given_A = normalize_by_axis(cooc_AB, 1)
pA_given_B = normalize_by_axis(cooc_AB, 0)
elif learn_cooc == 'JOINT':
share_cooc = tf.nn.relu(get_var('cooc_ab', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(share_cooc, 1)
pA_given_B = normalize_by_axis(share_cooc, 0)
elif learn_cooc == 'DISJOINT':
cooc1 = tf.nn.relu(get_var('pb_given_a', shape=[num_out_A, num_out_B]))
cooc2 = tf.nn.relu(get_var('pa_given_b', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(cooc1, 1)
pA_given_B = normalize_by_axis(cooc2, 0)
else:
raise NotImplementedError
return pA_given_B, pB_given_A
def get_self_correlated_mat(num_out_A, scope=None, reuse=None):
with tf.variable_scope(scope or 'Self_Correlated_mat', reuse=reuse):
cooc1 = get_var('pa_corr', shape=[num_out_A, num_out_A],
initializer_fn=tf.contrib.layers.variance_scaling_initializer(factor=0.1,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32),
regularizer_fn=tf.contrib.layers.l2_regularizer(scale=3e-4))
return tf.matmul(cooc1, cooc1, transpose_b=True) + tf.eye(num_out_A)
def gate_filter(x, scope=None, reuse=None):
with tf.variable_scope(scope or 'Gate', reuse=reuse):
threshold = get_var('threshold', shape=[])
gate = tf.cast(tf.greater(x, threshold), tf.float32)
return x * gate
from tensorflow.python.ops import array_ops
def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
r"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
return tf.reduce_sum(per_entry_cross_ent)
def spatial_dropout(x, scope=None, reuse=None):
input_dim = x.get_shape().as_list()[-1]
with tf.variable_scope(scope or 'spatial_dropout', reuse=reuse):
d = tf.random_uniform(shape=[1], minval=0, maxval=input_dim, dtype=tf.int32)
f = tf.one_hot(d, on_value=0., off_value=1., depth=input_dim)
g = x * f # do dropout
g *= (1. + 1. / input_dim) # do rescale
return g
def get_last_output(output, seq_length, scope=None, reuse=None):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
with tf.variable_scope(scope or 'gather_nd', reuse=reuse):
rng = tf.range(0, tf.shape(seq_length)[0])
indexes = tf.stack([rng, seq_length - 1], 1)
return tf.gather_nd(output, indexes)
def get_lstm_init_state(batch_size, num_layers, num_units, direction, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'lstm_init_state', reuse=reuse):
num_dir = 2 if direction.startswith('bi') else 1
c = get_var('lstm_init_c', shape=[num_layers * num_dir, num_units])
c = tf.tile(tf.expand_dims(c, axis=1), [1, batch_size, 1])
h = get_var('lstm_init_h', shape=[num_layers * num_dir, num_units])
h = tf.tile(tf.expand_dims(h, axis=1), [1, batch_size, 1])
return c, h
def dropout_res_layernorm(x, fx, act_fn=tf.nn.relu,
dropout_keep_rate=1.0,
residual=False,
normalize_output=False,
scope='rnd_block',
reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
input_dim = x.get_shape().as_list()[-1]
output_dim = fx.get_shape().as_list()[-1]
# do dropout
fx = tf.nn.dropout(fx, keep_prob=dropout_keep_rate)
if residual and input_dim != output_dim:
res_x = tf.layers.conv1d(x,
filters=output_dim,
kernel_size=1,
activation=None,
name='res_1x1conv')
else:
res_x = x
if residual and act_fn is None:
output = fx + res_x
elif residual and act_fn is not None:
output = act_fn(fx + res_x)
else:
output = fx
if normalize_output:
output = layer_norm(output)
return output
|
[
"tensorflow.reduce_sum",
"tensorflow.python.ops.array_ops.where",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.gather_nd",
"tensorflow.clip_by_value",
"tensorflow.matmul",
"tensorflow.greater",
"tensorflow.get_variable",
"tensorflow.one_hot",
"tensorflow.nn.moments",
"tensorflow.gather",
"tensorflow.variable_scope",
"tensorflow.stack",
"tensorflow.reduce_min",
"tensorflow.ones",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.eye",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.expand_dims",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.random_uniform",
"tensorflow.layers.dense",
"tensorflow.log1p",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.layers.conv1d",
"tensorflow.nn.sigmoid",
"tensorflow.nn.dropout"
] |
[((133, 243), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {'factor': '(1.0)', 'mode': '"""FAN_AVG"""', 'uniform': '(True)', 'dtype': 'tf.float32'}), "(factor=1.0, mode='FAN_AVG',\n uniform=True, dtype=tf.float32)\n", (179, 243), True, 'import tensorflow as tf\n'), ((442, 552), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {'factor': '(2.0)', 'mode': '"""FAN_IN"""', 'uniform': '(False)', 'dtype': 'tf.float32'}), "(factor=2.0, mode='FAN_IN',\n uniform=False, dtype=tf.float32)\n", (488, 552), True, 'import tensorflow as tf\n'), ((761, 806), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', ([], {'scale': '(3e-07)'}), '(scale=3e-07)\n', (793, 806), True, 'import tensorflow as tf\n'), ((3663, 3774), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer_fn', 'dtype': 'dtype', 'regularizer': 'regularizer_fn'}), '(name, shape, initializer=initializer_fn, dtype=dtype,\n regularizer=regularizer_fn, **kwargs)\n', (3678, 3774), True, 'import tensorflow as tf\n'), ((9952, 9984), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['prediction_tensor'], {}), '(prediction_tensor)\n', (9965, 9984), True, 'import tensorflow as tf\n'), ((9997, 10051), 'tensorflow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', (['sigmoid_p'], {'dtype': 'sigmoid_p.dtype'}), '(sigmoid_p, dtype=sigmoid_p.dtype)\n', (10017, 10051), False, 'from tensorflow.python.ops import array_ops\n'), ((10220, 10292), 'tensorflow.python.ops.array_ops.where', 'array_ops.where', (['(target_tensor > zeros)', '(target_tensor - sigmoid_p)', 'zeros'], {}), '(target_tensor > zeros, target_tensor - sigmoid_p, zeros)\n', (10235, 10292), False, 'from tensorflow.python.ops import array_ops\n'), ((10459, 10515), 'tensorflow.python.ops.array_ops.where', 'array_ops.where', (['(target_tensor > zeros)', 'zeros', 'sigmoid_p'], {}), '(target_tensor > zeros, zeros, sigmoid_p)\n', (10474, 10515), False, 'from tensorflow.python.ops import array_ops\n'), ((10753, 10787), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['per_entry_cross_ent'], {}), '(per_entry_cross_ent)\n', (10766, 10787), True, 'import tensorflow as tf\n'), ((1347, 1376), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (1361, 1376), True, 'import tensorflow as tf\n'), ((1466, 1508), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)', 'keepdims': '(True)'}), '(mask, axis=1, keepdims=True)\n', (1479, 1508), True, 'import tensorflow as tf\n'), ((2345, 2371), 'tensorflow.gather', 'tf.gather', (['weights', 'labels'], {}), '(weights, labels)\n', (2354, 2371), True, 'import tensorflow as tf\n'), ((3265, 3295), 'tensorflow.one_hot', 'tf.one_hot', (['labels', 'num_labels'], {}), '(labels, num_labels)\n', (3275, 3295), True, 'import tensorflow as tf\n'), ((4440, 4498), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'Layer_Normalize')"], {'reuse': 'reuse'}), "(scope or 'Layer_Normalize', reuse=reuse)\n", (4457, 4498), True, 'import tensorflow as tf\n'), ((4609, 4652), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', '[-1]'], {'keep_dims': '(True)'}), '(inputs, [-1], keep_dims=True)\n', (4622, 4652), True, 'import tensorflow as tf\n'), ((5000, 5042), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'linear_logit')"], {}), "(scope or 'linear_logit')\n", (5017, 5042), True, 'import tensorflow as tf\n'), ((5060, 5179), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'units', 'activation': 'act_fn', 'kernel_initializer': 'initializer', 'kernel_regularizer': 'regularizer'}), '(x, units=units, activation=act_fn, kernel_initializer=\n initializer, kernel_regularizer=regularizer)\n', (5075, 5179), True, 'import tensorflow as tf\n'), ((5276, 5320), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['logit'], {'keep_prob': 'dropout_keep'}), '(logit, keep_prob=dropout_keep)\n', (5289, 5320), True, 'import tensorflow as tf\n'), ((5581, 5625), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'bilinear_logit')"], {}), "(scope or 'bilinear_logit')\n", (5598, 5625), True, 'import tensorflow as tf\n'), ((6907, 6944), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x', 'axis'], {'keepdims': '(True)'}), '(x, axis, keepdims=True)\n', (6920, 6944), True, 'import tensorflow as tf\n'), ((7083, 7144), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'CrossCorrlated_Mat')"], {'reuse': 'reuse'}), "(scope or 'CrossCorrlated_Mat', reuse=reuse)\n", (7100, 7144), True, 'import tensorflow as tf\n'), ((8031, 8093), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'Self_Correlated_mat')"], {'reuse': 'reuse'}), "(scope or 'Self_Correlated_mat', reuse=reuse)\n", (8048, 8093), True, 'import tensorflow as tf\n'), ((8782, 8829), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'Gate')"], {'reuse': 'reuse'}), "(scope or 'Gate', reuse=reuse)\n", (8799, 8829), True, 'import tensorflow as tf\n'), ((10891, 10949), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'spatial_dropout')"], {'reuse': 'reuse'}), "(scope or 'spatial_dropout', reuse=reuse)\n", (10908, 10949), True, 'import tensorflow as tf\n'), ((10963, 11035), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[1]', 'minval': '(0)', 'maxval': 'input_dim', 'dtype': 'tf.int32'}), '(shape=[1], minval=0, maxval=input_dim, dtype=tf.int32)\n', (10980, 11035), True, 'import tensorflow as tf\n'), ((11048, 11107), 'tensorflow.one_hot', 'tf.one_hot', (['d'], {'on_value': '(0.0)', 'off_value': '(1.0)', 'depth': 'input_dim'}), '(d, on_value=0.0, off_value=1.0, depth=input_dim)\n', (11058, 11107), True, 'import tensorflow as tf\n'), ((11511, 11563), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'gather_nd')"], {'reuse': 'reuse'}), "(scope or 'gather_nd', reuse=reuse)\n", (11528, 11563), True, 'import tensorflow as tf\n'), ((11634, 11668), 'tensorflow.stack', 'tf.stack', (['[rng, seq_length - 1]', '(1)'], {}), '([rng, seq_length - 1], 1)\n', (11642, 11668), True, 'import tensorflow as tf\n'), ((11684, 11713), 'tensorflow.gather_nd', 'tf.gather_nd', (['output', 'indexes'], {}), '(output, indexes)\n', (11696, 11713), True, 'import tensorflow as tf\n'), ((11830, 11888), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'lstm_init_state')"], {'reuse': 'reuse'}), "(scope or 'lstm_init_state', reuse=reuse)\n", (11847, 11888), True, 'import tensorflow as tf\n'), ((12551, 12588), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (12568, 12588), True, 'import tensorflow as tf\n'), ((12723, 12769), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fx'], {'keep_prob': 'dropout_keep_rate'}), '(fx, keep_prob=dropout_keep_rate)\n', (12736, 12769), True, 'import tensorflow as tf\n'), ((1092, 1127), 'tensorflow.expand_dims', 'tf.expand_dims', (['(1.0 - mask)'], {'axis': '(-1)'}), '(1.0 - mask, axis=-1)\n', (1106, 1127), True, 'import tensorflow as tf\n'), ((2388, 2404), 'tensorflow.reduce_min', 'tf.reduce_min', (['w'], {}), '(w)\n', (2401, 2404), True, 'import tensorflow as tf\n'), ((4680, 4702), 'tensorflow.zeros', 'tf.zeros', (['params_shape'], {}), '(params_shape)\n', (4688, 4702), True, 'import tensorflow as tf\n'), ((4732, 4753), 'tensorflow.ones', 'tf.ones', (['params_shape'], {}), '(params_shape)\n', (4739, 4753), True, 'import tensorflow as tf\n'), ((5368, 5403), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['logit'], {}), '(logit)\n', (5396, 5403), True, 'import tensorflow as tf\n'), ((8665, 8706), 'tensorflow.matmul', 'tf.matmul', (['cooc1', 'cooc1'], {'transpose_b': '(True)'}), '(cooc1, cooc1, transpose_b=True)\n', (8674, 8706), True, 'import tensorflow as tf\n'), ((8709, 8726), 'tensorflow.eye', 'tf.eye', (['num_out_A'], {}), '(num_out_A)\n', (8715, 8726), True, 'import tensorflow as tf\n'), ((8905, 8929), 'tensorflow.greater', 'tf.greater', (['x', 'threshold'], {}), '(x, threshold)\n', (8915, 8929), True, 'import tensorflow as tf\n'), ((12043, 12068), 'tensorflow.expand_dims', 'tf.expand_dims', (['c'], {'axis': '(1)'}), '(c, axis=1)\n', (12057, 12068), True, 'import tensorflow as tf\n'), ((12186, 12211), 'tensorflow.expand_dims', 'tf.expand_dims', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (12200, 12211), True, 'import tensorflow as tf\n'), ((12840, 12935), 'tensorflow.layers.conv1d', 'tf.layers.conv1d', (['x'], {'filters': 'output_dim', 'kernel_size': '(1)', 'activation': 'None', 'name': '"""res_1x1conv"""'}), "(x, filters=output_dim, kernel_size=1, activation=None,\n name='res_1x1conv')\n", (12856, 12935), True, 'import tensorflow as tf\n'), ((2440, 2451), 'tensorflow.log1p', 'tf.log1p', (['w'], {}), '(w)\n', (2448, 2451), True, 'import tensorflow as tf\n'), ((8199, 8309), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {'factor': '(0.1)', 'mode': '"""FAN_AVG"""', 'uniform': '(True)', 'dtype': 'tf.float32'}), "(factor=0.1, mode='FAN_AVG',\n uniform=True, dtype=tf.float32)\n", (8245, 8309), True, 'import tensorflow as tf\n'), ((8604, 8650), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', ([], {'scale': '(0.0003)'}), '(scale=0.0003)\n', (8636, 8650), True, 'import tensorflow as tf\n'), ((10582, 10621), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['sigmoid_p', '(1e-08)', '(1.0)'], {}), '(sigmoid_p, 1e-08, 1.0)\n', (10598, 10621), True, 'import tensorflow as tf\n'), ((10696, 10741), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - sigmoid_p)', '(1e-08)', '(1.0)'], {}), '(1.0 - sigmoid_p, 1e-08, 1.0)\n', (10712, 10741), True, 'import tensorflow as tf\n'), ((11591, 11611), 'tensorflow.shape', 'tf.shape', (['seq_length'], {}), '(seq_length)\n', (11599, 11611), True, 'import tensorflow as tf\n'), ((2580, 2597), 'tensorflow.shape', 'tf.shape', (['weights'], {}), '(weights)\n', (2588, 2597), True, 'import tensorflow as tf\n')]
|
import shutil
import numpy as np
import pytest
import openmc
import openmc.capi
from tests import cdtemp
pytestmark = pytest.mark.skipif(
not openmc.capi._dagmc_enabled(),
reason="DAGMC CAD geometry is not enabled.")
@pytest.fixture(scope="module", autouse=True)
def dagmc_model(request):
model = openmc.model.Model()
# settings
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 100
model.settings.temperature = {'tolerance': 50.0}
model.settings.verbosity = 1
source_box = openmc.stats.Box([ -4, -4, -4 ],
[ 4, 4, 4 ])
source = openmc.Source(space=source_box)
model.settings.source = source
model.settings.dagmc = True
# tally
tally = openmc.Tally()
tally.scores = ['total']
tally.filters = [openmc.CellFilter(1)]
model.tallies = [tally]
# materials
u235 = openmc.Material(name="fuel")
u235.add_nuclide('U235', 1.0, 'ao')
u235.set_density('g/cc', 11)
u235.id = 40
u235.temperature = 320
water = openmc.Material(name="water")
water.add_nuclide('H1', 2.0, 'ao')
water.add_nuclide('O16', 1.0, 'ao')
water.set_density('g/cc', 1.0)
water.add_s_alpha_beta('c_H_in_H2O')
water.id = 41
mats = openmc.Materials([u235, water])
model.materials = mats
# location of dagmc file in test directory
dagmc_file = request.fspath.dirpath() + "/dagmc.h5m"
# move to a temporary directory
with cdtemp():
shutil.copyfile(dagmc_file, "./dagmc.h5m")
model.export_to_xml()
openmc.capi.init()
yield
openmc.capi.finalize()
@pytest.mark.parametrize("cell_id,exp_temp", ((1, 320.0), # assigned by material
(2, 300.0), # assigned in dagmc file
(3, 293.6))) # assigned by default
def test_dagmc_temperatures(cell_id, exp_temp):
cell = openmc.capi.cells[cell_id]
assert np.isclose(cell.get_temperature(), exp_temp)
|
[
"openmc.model.Model",
"openmc.Material",
"openmc.capi.init",
"openmc.capi._dagmc_enabled",
"openmc.capi.finalize",
"pytest.fixture",
"openmc.CellFilter",
"tests.cdtemp",
"openmc.Source",
"openmc.stats.Box",
"openmc.Tally",
"pytest.mark.parametrize",
"shutil.copyfile",
"openmc.Materials"
] |
[((232, 276), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (246, 276), False, 'import pytest\n'), ((1665, 1751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cell_id,exp_temp"""', '((1, 320.0), (2, 300.0), (3, 293.6))'], {}), "('cell_id,exp_temp', ((1, 320.0), (2, 300.0), (3, \n 293.6)))\n", (1688, 1751), False, 'import pytest\n'), ((316, 336), 'openmc.model.Model', 'openmc.model.Model', ([], {}), '()\n', (334, 336), False, 'import openmc\n'), ((554, 595), 'openmc.stats.Box', 'openmc.stats.Box', (['[-4, -4, -4]', '[4, 4, 4]'], {}), '([-4, -4, -4], [4, 4, 4])\n', (570, 595), False, 'import openmc\n'), ((650, 681), 'openmc.Source', 'openmc.Source', ([], {'space': 'source_box'}), '(space=source_box)\n', (663, 681), False, 'import openmc\n'), ((775, 789), 'openmc.Tally', 'openmc.Tally', ([], {}), '()\n', (787, 789), False, 'import openmc\n'), ((918, 946), 'openmc.Material', 'openmc.Material', ([], {'name': '"""fuel"""'}), "(name='fuel')\n", (933, 946), False, 'import openmc\n'), ((1077, 1106), 'openmc.Material', 'openmc.Material', ([], {'name': '"""water"""'}), "(name='water')\n", (1092, 1106), False, 'import openmc\n'), ((1292, 1323), 'openmc.Materials', 'openmc.Materials', (['[u235, water]'], {}), '([u235, water])\n', (1308, 1323), False, 'import openmc\n'), ((1639, 1661), 'openmc.capi.finalize', 'openmc.capi.finalize', ([], {}), '()\n', (1659, 1661), False, 'import openmc\n'), ((150, 178), 'openmc.capi._dagmc_enabled', 'openmc.capi._dagmc_enabled', ([], {}), '()\n', (176, 178), False, 'import openmc\n'), ((840, 860), 'openmc.CellFilter', 'openmc.CellFilter', (['(1)'], {}), '(1)\n', (857, 860), False, 'import openmc\n'), ((1502, 1510), 'tests.cdtemp', 'cdtemp', ([], {}), '()\n', (1508, 1510), False, 'from tests import cdtemp\n'), ((1520, 1562), 'shutil.copyfile', 'shutil.copyfile', (['dagmc_file', '"""./dagmc.h5m"""'], {}), "(dagmc_file, './dagmc.h5m')\n", (1535, 1562), False, 'import shutil\n'), ((1601, 1619), 'openmc.capi.init', 'openmc.capi.init', ([], {}), '()\n', (1617, 1619), False, 'import openmc\n')]
|
"""This script requires to launch a local ipcontroller. If you execute this
locally, do it with `ipcluster start`.
"""
import argparse
import glob
import logging
import os
import sys
import time
from ipyparallel import Client
from ipyparallel.util import interactive
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
@interactive
def process_fname(fname):
import pandas as pd
newfname = fname[:-3] + 'csv'
pd.read_hdf(fname, 'df').to_csv(newfname)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory',
help="Provide the directory of the HDF files "
"that shall be converted to csv here.")
args = parser.parse_args()
root = os.path.abspath(args.directory)
fnames = glob.glob(os.path.join(root, '*.hdf'))
logging.info('Found %i files to convert.', len(fnames))
c = Client()
lbview = c.load_balanced_view()
results = lbview.map_async(process_fname, fnames)
# progress display
while not results.ready():
print("{:.1f} %".format(100 * results.progress / len(fnames)))
sys.stdout.flush()
time.sleep(10)
logging.info('Conversion done.')
|
[
"ipyparallel.Client",
"os.path.abspath",
"argparse.ArgumentParser",
"logging.basicConfig",
"pandas.read_hdf",
"time.sleep",
"logging.info",
"sys.stdout.flush",
"os.path.join"
] |
[((269, 345), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s: %(message)s', level=logging.INFO)\n", (288, 345), False, 'import logging\n'), ((518, 543), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (541, 543), False, 'import argparse\n'), ((764, 795), 'os.path.abspath', 'os.path.abspath', (['args.directory'], {}), '(args.directory)\n', (779, 795), False, 'import os\n'), ((917, 925), 'ipyparallel.Client', 'Client', ([], {}), '()\n', (923, 925), False, 'from ipyparallel import Client\n'), ((1196, 1228), 'logging.info', 'logging.info', (['"""Conversion done."""'], {}), "('Conversion done.')\n", (1208, 1228), False, 'import logging\n'), ((819, 846), 'os.path.join', 'os.path.join', (['root', '"""*.hdf"""'], {}), "(root, '*.hdf')\n", (831, 846), False, 'import os\n'), ((1150, 1168), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1166, 1168), False, 'import sys\n'), ((1177, 1191), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1187, 1191), False, 'import time\n'), ((449, 473), 'pandas.read_hdf', 'pd.read_hdf', (['fname', '"""df"""'], {}), "(fname, 'df')\n", (460, 473), True, 'import pandas as pd\n')]
|
import argparse
from EasyLaMa import TextRemover
from .util import load_image
from .util import load_images
import os
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("images", nargs="+", help="Images to process. Required")
parser.add_argument("-e", "--edge", type=int, default=1, help="Extra margin at the edges of detected boxes. Default: 1")
parser.add_argument("-r", "--radius", type=int, default=1, help="Radius for rounded corners. 0 = no rounding. Default: 1")
parser.add_argument("-o", "--output", default=".", help="Output folder. Default: . (current dir)")
parser.add_argument("-of","--output_format", default=None, help="Output format (jpg, png...). Default: same as input")
parser.add_argument("-s", "--suffix", default="_result", help="Suffix for results. Default: _result")
parser.add_argument("-m", "--mask_suffix", default=None, help="Suffix for storing mask. Default: don't store mask")
parser.add_argument("-c", "--copy", action="store_true", help="Copy original image to output folder.")
parser.add_argument("-d", "--device", default="cuda", help="Device to use (cuda, cuda:0, cpu...). Default: cuda")
parser.add_argument("-l", "--languages", type=str, nargs="+", default=["en"], help="Languages to detect. See https://www.jaided.ai/easyocr/ for supported languages and their abbreviations. Default: en")
return parser.parse_args()
def cli():
args = get_args()
image_names = [os.path.splitext(os.path.basename(image)) for image in args.images]
images = load_images(args.images)
tr = TextRemover(languages=args.languages, device=args.device)
os.makedirs(args.output, exist_ok=True)
for (image, (name, ext)) in zip(images, image_names):
result_name = os.path.join(args.output, name + args.suffix + (args.output_format or ext))
mask_name = os.path.join(args.output, name + (args.mask_suffix or "") + (args.output_format or ext))
image_name = os.path.join(args.output, name + (args.output_format or ext))
result, mask = tr(image, mask_edge=args.edge, radius=args.radius)
result.save(result_name)
if args.mask_suffix:
mask.save(mask_name)
if args.copy:
image.save(image_name)
print("result: {}{}".format(result_name, ("\nmask: " + mask_name) if args.mask_suffix else ""))
|
[
"os.makedirs",
"EasyLaMa.TextRemover",
"argparse.ArgumentParser",
"os.path.basename",
"os.path.join"
] |
[((148, 173), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (171, 173), False, 'import argparse\n'), ((1590, 1647), 'EasyLaMa.TextRemover', 'TextRemover', ([], {'languages': 'args.languages', 'device': 'args.device'}), '(languages=args.languages, device=args.device)\n', (1601, 1647), False, 'from EasyLaMa import TextRemover\n'), ((1652, 1691), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (1663, 1691), False, 'import os\n'), ((1772, 1847), 'os.path.join', 'os.path.join', (['args.output', '(name + args.suffix + (args.output_format or ext))'], {}), '(args.output, name + args.suffix + (args.output_format or ext))\n', (1784, 1847), False, 'import os\n'), ((1868, 1961), 'os.path.join', 'os.path.join', (['args.output', "(name + (args.mask_suffix or '') + (args.output_format or ext))"], {}), "(args.output, name + (args.mask_suffix or '') + (args.\n output_format or ext))\n", (1880, 1961), False, 'import os\n'), ((1978, 2039), 'os.path.join', 'os.path.join', (['args.output', '(name + (args.output_format or ext))'], {}), '(args.output, name + (args.output_format or ext))\n', (1990, 2039), False, 'import os\n'), ((1492, 1515), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (1508, 1515), False, 'import os\n')]
|
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from scipy.sparse import issparse
import numdifftools as nd
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing as mp
import itertools, functools
from ..tools.utils import timeit
def is_outside_domain(x, domain):
x = x[None, :] if x.ndim == 1 else x
return np.any(np.logical_or(x < domain[0], x > domain[1]), axis=1)
def grad(f, x):
"""Gradient of scalar-valued function f evaluated at x"""
return nd.Gradient(f)(x)
def laplacian(f, x):
"""Laplacian of scalar field f evaluated at x"""
hes = nd.Hessdiag(f)(x)
return sum(hes)
# ---------------------------------------------------------------------------------------------------
# vector field function
@timeit
def vector_field_function(x, vf_dict, dim=None, kernel='full', **kernel_kwargs):
"""vector field function constructed by sparseVFC.
Reference: Regularized vector field learning with sparse approximation for mismatch removal, Ma, Jiayi, etc. al, Pattern Recognition
"""
# x=np.array(x).reshape((1, -1))
if "div_cur_free_kernels" in vf_dict.keys():
has_div_cur_free_kernels = True
else:
has_div_cur_free_kernels = False
#x = np.array(x)
if x.ndim == 1:
x = x[None, :]
if has_div_cur_free_kernels:
if kernel == 'full':
kernel_ind = 0
elif kernel == 'df_kernel':
kernel_ind = 1
elif kernel == 'cf_kernel':
kernel_ind = 2
else:
raise ValueError(f"the kernel can only be one of {'full', 'df_kernel', 'cf_kernel'}!")
K = con_K_div_cur_free(x, vf_dict["X_ctrl"], vf_dict["sigma"], vf_dict["eta"], **kernel_kwargs)[kernel_ind]
else:
Xc = vf_dict["X_ctrl"]
K = con_K(x, Xc, vf_dict["beta"], **kernel_kwargs)
K = K.dot(vf_dict["C"])
if dim is not None and not has_div_cur_free_kernels:
if np.isscalar(dim):
K = K[:, :dim]
elif dim is not None:
K = K[:, dim]
return K
@timeit
def con_K(x, y, beta, method='cdist', return_d=False):
"""con_K constructs the kernel K, where K(i, j) = k(x, y) = exp(-beta * ||x - y||^2).
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions.
beta: float (default: 0.1)
Paramerter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2),
return_d: bool
If True the intermediate 3D matrix x - y will be returned for analytical Jacobian.
Returns
-------
K: :class:`~numpy.ndarray`
the kernel to represent the vector field function.
"""
if method == 'cdist' and not return_d:
K = cdist(x, y, 'sqeuclidean')
if len(K) == 1:
K = K.flatten()
else:
n = x.shape[0]
m = y.shape[0]
# https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy
# https://stackoverflow.com/questions/12787475/matlabs-permute-in-python
D = np.matlib.tile(x[:, :, None], [1, 1, m]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, n]), [2, 1, 0])
K = np.squeeze(np.sum(D ** 2, 1))
K = -beta * K
K = np.exp(K)
if return_d:
return K, D
else:
return K
@timeit
def con_K_div_cur_free(x, y, sigma=0.8, eta=0.5):
"""Construct a convex combination of the divergence-free kernel T_df and curl-free kernel T_cf with a bandwidth sigma
and a combination coefficient gamma.
Arguments
---------
x: :class:`~numpy.ndarray`
Original training data points.
y: :class:`~numpy.ndarray`
Control points used to build kernel basis functions
sigma: int (default: `0.8`)
Bandwidth parameter.
eta: int (default: `0.5`)
Combination coefficient for the divergence-free or the curl-free kernels.
Returns
-------
A tuple of G (the combined kernel function), divergence-free kernel and curl-free kernel.
See also: :func:`sparseVFC`.
"""
m, d = x.shape
n, d = y.shape
sigma2 = sigma ** 2
G_tmp = np.matlib.tile(x[:, :, None], [1, 1, n]) - np.transpose(
np.matlib.tile(y[:, :, None], [1, 1, m]), [2, 1, 0]
)
G_tmp = np.squeeze(np.sum(G_tmp ** 2, 1))
G_tmp3 = -G_tmp / sigma2
G_tmp = -G_tmp / (2 * sigma2)
G_tmp = np.exp(G_tmp) / sigma2
G_tmp = np.kron(G_tmp, np.ones((d, d)))
x_tmp = np.matlib.tile(x, [n, 1])
y_tmp = np.matlib.tile(y, [1, m]).T
y_tmp = y_tmp.reshape((d, m * n), order='F').T
xminusy = x_tmp - y_tmp
G_tmp2 = np.zeros((d * m, d * n))
tmp4_ = np.zeros((d, d))
for i in tqdm(range(d), desc="Iterating each dimension in con_K_div_cur_free:"):
for j in np.arange(i, d):
tmp1 = xminusy[:, i].reshape((m, n), order='F')
tmp2 = xminusy[:, j].reshape((m, n), order='F')
tmp3 = tmp1 * tmp2
tmp4 = tmp4_.copy()
tmp4[i, j] = 1
tmp4[j, i] = 1
G_tmp2 = G_tmp2 + np.kron(tmp3, tmp4)
G_tmp2 = G_tmp2 / sigma2
G_tmp3 = np.kron((G_tmp3 + d - 1), np.eye(d))
G_tmp4 = np.kron(np.ones((m, n)), np.eye(d)) - G_tmp2
df_kernel, cf_kernel = (1 - eta) * G_tmp * (G_tmp2 + G_tmp3), eta * G_tmp * G_tmp4
G = df_kernel + cf_kernel
return G, df_kernel, cf_kernel
def vecfld_from_adata(adata, basis='', vf_key='VecFld'):
if basis is not None or len(basis) > 0:
vf_key = '%s_%s' % (vf_key, basis)
if vf_key not in adata.uns.keys():
raise ValueError(
f'Vector field function {vf_key} is not included in the adata object! '
f"Try firstly running dyn.tl.VectorField(adata, basis='{basis}')")
vf_dict = adata.uns[vf_key]['VecFld']
func = lambda x: vector_field_function(x, vf_dict)
return vf_dict, func
def vector_transformation(V, Q):
"""Transform vectors from PCA space to the original space using the formula:
:math:`\hat{v} = v Q^T`,
where `Q, v, \hat{v}` are the PCA loading matrix, low dimensional vector and the
transformed high dimensional vector.
Parameters
----------
V: :class:`~numpy.ndarray`
The n x k array of vectors to be transformed, where n is the number of vectors,
k the dimension.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: :class:`~numpy.ndarray`
The array of transformed vectors.
"""
return V @ Q.T
def vector_field_function_transformation(vf_func, Q):
"""Transform vector field function from PCA space to the original space.
The formula used for transformation:
:math:`\hat{f} = f Q^T`,
where `Q, f, \hat{f}` are the PCA loading matrix, low dimensional vector field function and the
transformed high dimensional vector field function.
Parameters
----------
vf_func: callable
The vector field function.
Q: :class:`~numpy.ndarray`
PCA loading matrix with dimension d x k, where d is the dimension of the original space,
and k the number of leading PCs.
Returns
-------
ret: callable
The transformed vector field function.
"""
return lambda x: vf_func.func(x) @ Q.T
# ---------------------------------------------------------------------------------------------------
# jacobian
def Jacobian_rkhs_gaussian(x, vf_dict, vectorize=False):
"""analytical Jacobian for RKHS vector field functions with Gaussian kernel.
Arguments
---------
x: :class:`~numpy.ndarray`
Coordinates where the Jacobian is evaluated.
vf_dict: dict
A dictionary containing RKHS vector field control points, Gaussian bandwidth,
and RKHS coefficients.
Essential keys: 'X_ctrl', 'beta', 'C'
Returns
-------
J: :class:`~numpy.ndarray`
Jacobian matrices stored as d-by-d-by-n numpy arrays evaluated at x.
d is the number of dimensions and n the number of coordinates in x.
"""
if x.ndim == 1:
K, D = con_K(x[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J = (vf_dict['C'].T * K) @ D[0].T
elif not vectorize:
n, d = x.shape
J = np.zeros((d, d, n))
for i, xi in enumerate(x):
K, D = con_K(xi[None, :], vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
J[:, :, i] = (vf_dict['C'].T * K) @ D[0].T
else:
K, D = con_K(x, vf_dict['X_ctrl'], vf_dict['beta'], return_d=True)
if K.ndim == 1: K = K[None, :]
J = np.einsum('nm, mi, njm -> ijn', K, vf_dict['C'], D)
return -2 * vf_dict['beta'] * J
def Jacobian_rkhs_gaussian_parallel(x, vf_dict, cores=None):
n = len(x)
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
xx = []
for i in range(0, n, n_j_per_core):
xx.append(x[i:i+n_j_per_core])
#with mp.Pool(cores) as p:
# ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
with ThreadPool(cores) as p:
ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def Jacobian_numerical(f, input_vector_convention='row'):
'''
Get the numerical Jacobian of the vector field function.
If the input_vector_convention is 'row', it means that fjac takes row vectors
as input, otherwise the input should be an array of column vectors. Note that
the returned Jacobian would behave exactly the same if the input is an 1d array.
The column vector convention is slightly faster than the row vector convention.
So the matrix of row vector convention is converted into column vector convention
under the hood.
No matter the input vector convention, the returned Jacobian is of the following
format:
df_1/dx_1 df_1/dx_2 df_1/dx_3 ...
df_2/dx_1 df_2/dx_2 df_2/dx_3 ...
df_3/dx_1 df_3/dx_2 df_3/dx_3 ...
... ... ... ...
'''
fjac = nd.Jacobian(lambda x: f(x.T).T)
if input_vector_convention == 'row' or input_vector_convention == 0:
def f_aux(x):
x = x.T
return fjac(x)
return f_aux
else:
return fjac
@timeit
def elementwise_jacobian_transformation(Js, qi, qj):
"""Inverse transform low dimensional k x k Jacobian matrix (:math:`\partial F_i / \partial x_j`) back to the
d-dimensional gene expression space. The formula used to inverse transform Jacobian matrix calculated from
low dimension (PCs) is:
:math:`Jac = Q J Q^T`,
where `Q, J, Jac` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes only one row from Q to form qi or qj.
Parameters
----------
Js: :class:`~numpy.ndarray`
k x k x n matrices of n k-by-k Jacobians.
qi: :class:`~numpy.ndarray`
The i-th row of the PC loading matrix Q with dimension d x k, corresponding to the regulator gene i.
qj: :class:`~numpy.ndarray`
The j-th row of the PC loading matrix Q with dimension d x k, corresponding to the effector gene j.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated vector of Jacobian matrix (:math:`\partial F_i / \partial x_j`) for each cell.
"""
Js = np.atleast_3d(Js)
n = Js.shape[2]
ret = np.zeros(n)
for i in tqdm(range(n), "calculating Jacobian for each cell"):
ret[i] = qi @ Js[:, :, i] @ qj
return ret
@timeit
def subset_jacobian_transformation(Js, Qi, Qj, cores=1):
"""Transform Jacobian matrix (:math:`\partial F_i / \partial x_j`) from PCA space to the original space.
The formula used for transformation:
:math:`\hat{J} = Q J Q^T`,
where `Q, J, \hat{J}` are the PCA loading matrix, low dimensional Jacobian matrix and the inverse transformed high
dimensional Jacobian matrix. This function takes multiple rows from Q to form Qi or Qj.
Parameters
----------
fjac: callable
The function for calculating numerical Jacobian matrix.
X: :class:`~numpy.ndarray`
The samples coordinates with dimension n_obs x n_PCs, from which Jacobian will be calculated.
Qi: :class:`~numpy.ndarray`
Sampled genes' PCA loading matrix with dimension n' x n_PCs, from which local dimension Jacobian matrix (k x k)
will be inverse transformed back to high dimension.
Qj: :class:`~numpy.ndarray`
Sampled genes' (sample genes can be the same as those in Qi or different) PCs loading matrix with dimension
n' x n_PCs, from which local dimension Jacobian matrix (k x k) will be inverse transformed back to high dimension.
cores: int (default: 1):
Number of cores to calculate Jacobian. If cores is set to be > 1, multiprocessing will be used to
parallel the Jacobian calculation.
return_J: bool (default: False)
Whether to return the raw tensor of Jacobian matrix of each cell before transformation.
Returns
-------
ret: :class:`~numpy.ndarray`
The calculated Jacobian matrix (n_gene x n_gene x n_obs) for each cell.
"""
Js = np.atleast_3d(Js)
Qi = np.atleast_2d(Qi)
Qj = np.atleast_2d(Qj)
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n))
if cores == 1:
ret = transform_jacobian(Js, Qi, Qj, pbar=True)
else:
if cores is None: cores = mp.cpu_count()
n_j_per_core = int(np.ceil(n / cores))
JJ = []
for i in range(0, n, n_j_per_core):
JJ.append(Js[:, :, i:i+n_j_per_core])
with ThreadPool(cores) as p:
ret = p.starmap(transform_jacobian, zip(JJ,
itertools.repeat(Qi), itertools.repeat(Qj)))
ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
return ret
def transform_jacobian(Js, Qi, Qj, pbar=False):
d1, d2, n = Qi.shape[0], Qj.shape[0], Js.shape[2]
ret = np.zeros((d1, d2, n), dtype=np.float32)
if pbar:
iterj = tqdm(range(n), desc='Transforming subset Jacobian')
else:
iterj = range(n)
for i in iterj:
J = Js[:, :, i]
ret[:, :, i] = Qi @ J @ Qj.T
return ret
def average_jacobian_by_group(Js, group_labels):
"""
Returns a dictionary of averaged jacobians with group names as the keys.
No vectorized indexing was used due to its high memory cost.
"""
d1, d2, _ = Js.shape
groups = np.unique(group_labels)
J_mean = {}
N = {}
for i, g in enumerate(group_labels):
if g in J_mean.keys():
J_mean[g] += Js[:, :, i]
N[g] += 1
else:
J_mean[g] = Js[:, :, i]
N[g] = 1
for g in groups:
J_mean[g] /= N[g]
return J_mean
# ---------------------------------------------------------------------------------------------------
# dynamical properties
def _divergence(f, x):
"""Divergence of the reconstructed vector field function f evaluated at x"""
jac = nd.Jacobian(f)(x)
return np.trace(jac)
@timeit
def compute_divergence(f_jac, X, vectorize_size=1):
"""Calculate divergence for many samples by taking the trace of a Jacobian matrix.
vectorize_size is used to control the number of samples computed in each vectorized batch.
If vectorize_size = 1, there's no vectorization whatsoever.
If vectorize_size = None, all samples are vectorized.
"""
n = len(X)
if vectorize_size is None: vectorize_size = n
div = np.zeros(n)
for i in tqdm(range(0, n, vectorize_size), desc="Calculating divergence"):
J = f_jac(X[i:i+vectorize_size])
div[i:i+vectorize_size] = np.trace(J)
return div
def acceleration_(v, J):
if v.ndim == 1: v = v[:, None]
return J.dot(v)
def curvature_1(a, v):
"""https://link.springer.com/article/10.1007/s12650-018-0474-6"""
if v.ndim == 1: v = v[:, None]
kappa = np.linalg.norm(np.outer(v, a)) / np.linalg.norm(v)**3
return kappa
def curvature_2(a, v):
"""https://dl.acm.org/doi/10.5555/319351.319441"""
# if v.ndim == 1: v = v[:, None]
kappa = (np.multiply(a, np.dot(v, v)) - np.multiply(v, np.dot(v, a))) / np.linalg.norm(v)**4
return kappa
def torsion_(v, J, a):
"""only works in 3D"""
if v.ndim == 1: v = v[:, None]
tau = np.outer(v, a).dot(J.dot(a)) / np.linalg.norm(np.outer(v, a))**2
return tau
@timeit
def compute_acceleration(vf, f_jac, X, return_all=False):
"""Calculate acceleration for many samples via
.. math::
a = J \cdot v.
"""
n = len(X)
acce = np.zeros((n, X.shape[1]))
v_ = vf(X)
J_ = f_jac(X)
for i in tqdm(range(n), desc=f"Calculating acceleration"):
v = v_[i]
J = J_[:, :, i]
acce[i] = acceleration_(v, J).flatten()
if return_all:
return v_, J_, acce
else:
return acce
@timeit
def compute_curvature(vf, f_jac, X, formula=2):
"""Calculate curvature for many samples via
Formula 1:
.. math::
\kappa = \frac{||\mathbf{v} \times \mathbf{a}||}{||\mathbf{V}||^3}
Formula 2:
.. math::
\kappa = \frac{||\mathbf{Jv} (\mathbf{v} \cdot \mathbf{v}) - ||\mathbf{v} (\mathbf{v} \cdot \mathbf{Jv})}{||\mathbf{V}||^4}
"""
n = len(X)
curv = np.zeros(n)
v, _, a = compute_acceleration(vf, f_jac, X, return_all=True)
cur_mat = np.zeros((n, X.shape[1])) if formula == 2 else None
for i in tqdm(range(n), desc="Calculating curvature"):
if formula == 1:
curv[i] = curvature_1(a[i], v[i])
elif formula == 2:
cur_mat[i] = curvature_2(a[i], v[i])
curv[i] = np.linalg.norm(cur_mat[i])
return (curv, cur_mat)
@timeit
def compute_torsion(vf, f_jac, X):
"""Calculate torsion for many samples via
.. math::
\tau = \frac{(\mathbf{v} \times \mathbf{a}) \cdot (\mathbf{J} \cdot \mathbf{a})}{||\mathbf{V} \times \mathbf{a}||^2}
"""
if X.shape[1] != 3:
raise Exception(f'torsion is only defined in 3 dimension.')
n = len(X)
tor = np.zeros((n, X.shape[1], X.shape[1]))
v, J, a = compute_acceleration(vf, f_jac, X, return_all=True)
for i in tqdm(range(n), desc="Calculating torsion"):
tor[i] = torsion_(v[i], J[:, :, i], a[i])
return tor
def _curl(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 3D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
return np.array([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]])
def curl2d(f, x, method='analytical', VecFld=None, jac=None):
"""Curl of the reconstructed vector field f evaluated at x in 2D"""
if jac is None:
if method == 'analytical' and VecFld is not None:
jac = Jacobian_rkhs_gaussian(x, VecFld)
else:
jac = nd.Jacobian(f)(x)
curl = jac[1, 0] - jac[0, 1]
return curl
@timeit
def compute_curl(f_jac, X):
"""Calculate curl for many samples for 2/3 D systems.
"""
if X.shape[1] > 3:
raise Exception(f'curl is only defined in 2/3 dimension.')
n = len(X)
if X.shape[1] == 2:
curl = np.zeros(n)
f = curl2d
else:
curl = np.zeros((n, 2, 2))
f = _curl
for i in tqdm(range(n), desc=f"Calculating {X.shape[1]}-D curl"):
J = f_jac(X[i])
curl[i] = f(None, None, method='analytical', VecFld=None, jac=J)
return curl
# ---------------------------------------------------------------------------------------------------
# ranking related utilies
def get_metric_gene_in_rank(mat, genes, neg=False):
metric_in_rank = mat.mean(0).A1 if issparse(mat) else mat.mean(0)
rank = metric_in_rank.argsort() if neg else metric_in_rank.argsort()[::-1]
metric_in_rank, genes_in_rank = metric_in_rank[rank], genes[rank]
return metric_in_rank, genes_in_rank
def get_metric_gene_in_rank_by_group(mat, genes, groups, grp, neg=False):
mask = groups == grp
if type(mask) == pd.Series: mask = mask.values
gene_wise_metrics, group_wise_metrics = mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0), \
mat[mask, :].mean(0).A1 if issparse(mat) else mat[mask, :].mean(0)
rank = gene_wise_metrics.argsort() if neg else gene_wise_metrics.argsort()[::-1]
gene_wise_metrics, genes_in_rank = gene_wise_metrics[rank], genes[rank]
return gene_wise_metrics, group_wise_metrics, genes_in_rank
def get_sorted_metric_genes_df(df, genes, neg=False):
sorted_metric = pd.DataFrame({key: (sorted(values, reverse=False) if neg else sorted(values, reverse=True))
for key, values in df.transpose().iterrows()})
sorted_genes = pd.DataFrame({key: (genes[values.argsort()] if neg else genes[values.argsort()[::-1]])
for key, values in df.transpose().iterrows()})
return sorted_metric, sorted_genes
def rank_vector_calculus_metrics(mat, genes, group, groups, uniq_group):
if issparse(mat):
mask = mat.data > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat.data[~ mask], neg_mat.data[mask] = 0, 0
pos_mat.eliminate_zeros()
neg_mat.eliminate_zeros()
else:
mask = mat > 0
pos_mat, neg_mat = mat.copy(), mat.copy()
pos_mat[~ mask], neg_mat[mask] = 0, 0
if group is None:
metric_in_rank, genes_in_rank = get_metric_gene_in_rank(abs(mat), genes)
pos_metric_in_rank, pos_genes_in_rank = get_metric_gene_in_rank(pos_mat, genes)
neg_metric_in_rank, neg_genes_in_rank = get_metric_gene_in_rank(neg_mat, genes, neg=True)
return metric_in_rank, genes_in_rank, pos_metric_in_rank, pos_genes_in_rank, neg_metric_in_rank, neg_genes_in_rank
else:
gene_wise_metrics, gene_wise_genes, gene_wise_pos_metrics, gene_wise_pos_genes, gene_wise_neg_metrics, gene_wise_neg_genes = {}, {}, {}, {}, {}, {}
group_wise_metrics, group_wise_genes, group_wise_pos_metrics, group_wise_pos_genes, group_wise_neg_metrics, group_wise_neg_genes = {}, {}, {}, {}, {}, {}
for i, grp in tqdm(enumerate(uniq_group), desc='ranking genes across gropus'):
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = None, None, None
gene_wise_metrics[grp], group_wise_metrics[grp], gene_wise_genes[grp] = \
get_metric_gene_in_rank_by_group(abs(mat), genes, groups, grp)
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = None, None, None
gene_wise_pos_metrics[grp], group_wise_pos_metrics[grp], gene_wise_pos_genes[grp] = \
get_metric_gene_in_rank_by_group(pos_mat, genes, groups, grp)
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = None, None, None
gene_wise_neg_metrics[grp], group_wise_neg_metrics[grp], gene_wise_neg_genes[grp] = \
get_metric_gene_in_rank_by_group(neg_mat, genes, groups, grp, neg=True)
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_metrics), genes)
pos_metric_gene_rank_by_group, pos_genes_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_pos_metrics), genes)
neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene = \
get_sorted_metric_genes_df(pd.DataFrame(group_wise_neg_metrics), genes, neg=True)
metric_in_gene_rank_by_group, genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_metrics), pd.DataFrame(gene_wise_genes)
pos_metric_in_gene_rank_by_group, pos_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_pos_metrics), pd.DataFrame(gene_wise_pos_genes)
neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group = \
pd.DataFrame(gene_wise_neg_metrics), pd.DataFrame(gene_wise_neg_genes)
return (metric_in_gene_rank_by_group, genes_in_gene_rank_by_group, pos_metric_in_gene_rank_by_group,
pos_genes_in_gene_rank_by_group, neg_metric_in_gene_rank_by_group, neg_genes_in_gene_rank_by_group,
metric_in_group_rank_by_gene, genes_in_group_rank_by_gene, pos_metric_gene_rank_by_group,
pos_genes_group_rank_by_gene, neg_metric_in_group_rank_by_gene, neg_genes_in_group_rank_by_gene,)
|
[
"numdifftools.Hessdiag",
"numpy.trace",
"numpy.sum",
"scipy.sparse.issparse",
"numpy.einsum",
"numpy.ones",
"numdifftools.Gradient",
"numpy.arange",
"numpy.exp",
"numpy.matlib.tile",
"numpy.linalg.norm",
"numpy.unique",
"multiprocessing.cpu_count",
"numpy.atleast_2d",
"pandas.DataFrame",
"multiprocessing.dummy.Pool",
"numpy.transpose",
"numpy.kron",
"scipy.spatial.distance.cdist",
"numpy.ceil",
"numpy.dot",
"numdifftools.Jacobian",
"numpy.vstack",
"itertools.repeat",
"numpy.atleast_3d",
"numpy.outer",
"numpy.isscalar",
"numpy.zeros",
"numpy.array",
"numpy.logical_or",
"numpy.eye"
] |
[((3391, 3400), 'numpy.exp', 'np.exp', (['K'], {}), '(K)\n', (3397, 3400), True, 'import numpy as np\n'), ((4647, 4672), 'numpy.matlib.tile', 'np.matlib.tile', (['x', '[n, 1]'], {}), '(x, [n, 1])\n', (4661, 4672), True, 'import numpy as np\n'), ((4805, 4829), 'numpy.zeros', 'np.zeros', (['(d * m, d * n)'], {}), '((d * m, d * n))\n', (4813, 4829), True, 'import numpy as np\n'), ((4843, 4859), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (4851, 4859), True, 'import numpy as np\n'), ((12110, 12127), 'numpy.atleast_3d', 'np.atleast_3d', (['Js'], {}), '(Js)\n', (12123, 12127), True, 'import numpy as np\n'), ((12158, 12169), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (12166, 12169), True, 'import numpy as np\n'), ((14054, 14071), 'numpy.atleast_3d', 'np.atleast_3d', (['Js'], {}), '(Js)\n', (14067, 14071), True, 'import numpy as np\n'), ((14081, 14098), 'numpy.atleast_2d', 'np.atleast_2d', (['Qi'], {}), '(Qi)\n', (14094, 14098), True, 'import numpy as np\n'), ((14108, 14125), 'numpy.atleast_2d', 'np.atleast_2d', (['Qj'], {}), '(Qj)\n', (14121, 14125), True, 'import numpy as np\n'), ((14191, 14212), 'numpy.zeros', 'np.zeros', (['(d1, d2, n)'], {}), '((d1, d2, n))\n', (14199, 14212), True, 'import numpy as np\n'), ((14918, 14957), 'numpy.zeros', 'np.zeros', (['(d1, d2, n)'], {'dtype': 'np.float32'}), '((d1, d2, n), dtype=np.float32)\n', (14926, 14957), True, 'import numpy as np\n'), ((15425, 15448), 'numpy.unique', 'np.unique', (['group_labels'], {}), '(group_labels)\n', (15434, 15448), True, 'import numpy as np\n'), ((16018, 16031), 'numpy.trace', 'np.trace', (['jac'], {}), '(jac)\n', (16026, 16031), True, 'import numpy as np\n'), ((16492, 16503), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (16500, 16503), True, 'import numpy as np\n'), ((17579, 17604), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1])'], {}), '((n, X.shape[1]))\n', (17587, 17604), True, 'import numpy as np\n'), ((18271, 18282), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (18279, 18282), True, 'import numpy as np\n'), ((19053, 19090), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1], X.shape[1])'], {}), '((n, X.shape[1], X.shape[1]))\n', (19061, 19090), True, 'import numpy as np\n'), ((19608, 19687), 'numpy.array', 'np.array', (['[jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]]'], {}), '([jac[2, 1] - jac[1, 2], jac[0, 2] - jac[2, 0], jac[1, 0] - jac[0, 1]])\n', (19616, 19687), True, 'import numpy as np\n'), ((22186, 22199), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (22194, 22199), False, 'from scipy.sparse import issparse\n'), ((400, 443), 'numpy.logical_or', 'np.logical_or', (['(x < domain[0])', '(x > domain[1])'], {}), '(x < domain[0], x > domain[1])\n', (413, 443), True, 'import numpy as np\n'), ((544, 558), 'numdifftools.Gradient', 'nd.Gradient', (['f'], {}), '(f)\n', (555, 558), True, 'import numdifftools as nd\n'), ((648, 662), 'numdifftools.Hessdiag', 'nd.Hessdiag', (['f'], {}), '(f)\n', (659, 662), True, 'import numdifftools as nd\n'), ((1989, 2005), 'numpy.isscalar', 'np.isscalar', (['dim'], {}), '(dim)\n', (2000, 2005), True, 'import numpy as np\n'), ((2868, 2894), 'scipy.spatial.distance.cdist', 'cdist', (['x', 'y', '"""sqeuclidean"""'], {}), "(x, y, 'sqeuclidean')\n", (2873, 2894), False, 'from scipy.spatial.distance import cdist\n'), ((4323, 4363), 'numpy.matlib.tile', 'np.matlib.tile', (['x[:, :, None]', '[1, 1, n]'], {}), '(x[:, :, None], [1, 1, n])\n', (4337, 4363), True, 'import numpy as np\n'), ((4469, 4490), 'numpy.sum', 'np.sum', (['(G_tmp ** 2)', '(1)'], {}), '(G_tmp ** 2, 1)\n', (4475, 4490), True, 'import numpy as np\n'), ((4567, 4580), 'numpy.exp', 'np.exp', (['G_tmp'], {}), '(G_tmp)\n', (4573, 4580), True, 'import numpy as np\n'), ((4617, 4632), 'numpy.ones', 'np.ones', (['(d, d)'], {}), '((d, d))\n', (4624, 4632), True, 'import numpy as np\n'), ((4685, 4710), 'numpy.matlib.tile', 'np.matlib.tile', (['y', '[1, m]'], {}), '(y, [1, m])\n', (4699, 4710), True, 'import numpy as np\n'), ((4962, 4977), 'numpy.arange', 'np.arange', (['i', 'd'], {}), '(i, d)\n', (4971, 4977), True, 'import numpy as np\n'), ((5335, 5344), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5341, 5344), True, 'import numpy as np\n'), ((9235, 9249), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (9247, 9249), True, 'import multiprocessing as mp\n'), ((9273, 9291), 'numpy.ceil', 'np.ceil', (['(n / cores)'], {}), '(n / cores)\n', (9280, 9291), True, 'import numpy as np\n'), ((9509, 9526), 'multiprocessing.dummy.Pool', 'ThreadPool', (['cores'], {}), '(cores)\n', (9519, 9526), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((9628, 9659), 'numpy.transpose', 'np.transpose', (['r'], {'axes': '(2, 0, 1)'}), '(r, axes=(2, 0, 1))\n', (9640, 9659), True, 'import numpy as np\n'), ((9697, 9711), 'numpy.vstack', 'np.vstack', (['ret'], {}), '(ret)\n', (9706, 9711), True, 'import numpy as np\n'), ((15989, 16003), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (16000, 16003), True, 'import numdifftools as nd\n'), ((16658, 16669), 'numpy.trace', 'np.trace', (['J'], {}), '(J)\n', (16666, 16669), True, 'import numpy as np\n'), ((18363, 18388), 'numpy.zeros', 'np.zeros', (['(n, X.shape[1])'], {}), '((n, X.shape[1]))\n', (18371, 18388), True, 'import numpy as np\n'), ((20304, 20315), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (20312, 20315), True, 'import numpy as np\n'), ((20360, 20379), 'numpy.zeros', 'np.zeros', (['(n, 2, 2)'], {}), '((n, 2, 2))\n', (20368, 20379), True, 'import numpy as np\n'), ((20804, 20817), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (20812, 20817), False, 'from scipy.sparse import issparse\n'), ((3201, 3241), 'numpy.matlib.tile', 'np.matlib.tile', (['x[:, :, None]', '[1, 1, m]'], {}), '(x[:, :, None], [1, 1, m])\n', (3215, 3241), True, 'import numpy as np\n'), ((3346, 3363), 'numpy.sum', 'np.sum', (['(D ** 2)', '(1)'], {}), '(D ** 2, 1)\n', (3352, 3363), True, 'import numpy as np\n'), ((4388, 4428), 'numpy.matlib.tile', 'np.matlib.tile', (['y[:, :, None]', '[1, 1, m]'], {}), '(y[:, :, None], [1, 1, m])\n', (4402, 4428), True, 'import numpy as np\n'), ((5367, 5382), 'numpy.ones', 'np.ones', (['(m, n)'], {}), '((m, n))\n', (5374, 5382), True, 'import numpy as np\n'), ((5384, 5393), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5390, 5393), True, 'import numpy as np\n'), ((8703, 8722), 'numpy.zeros', 'np.zeros', (['(d, d, n)'], {}), '((d, d, n))\n', (8711, 8722), True, 'import numpy as np\n'), ((9038, 9089), 'numpy.einsum', 'np.einsum', (['"""nm, mi, njm -> ijn"""', 'K', "vf_dict['C']", 'D'], {}), "('nm, mi, njm -> ijn', K, vf_dict['C'], D)\n", (9047, 9089), True, 'import numpy as np\n'), ((14333, 14347), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (14345, 14347), True, 'import multiprocessing as mp\n'), ((14375, 14393), 'numpy.ceil', 'np.ceil', (['(n / cores)'], {}), '(n / cores)\n', (14382, 14393), True, 'import numpy as np\n'), ((14518, 14535), 'multiprocessing.dummy.Pool', 'ThreadPool', (['cores'], {}), '(cores)\n', (14528, 14535), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((14683, 14714), 'numpy.transpose', 'np.transpose', (['r'], {'axes': '(2, 0, 1)'}), '(r, axes=(2, 0, 1))\n', (14695, 14714), True, 'import numpy as np\n'), ((14756, 14770), 'numpy.vstack', 'np.vstack', (['ret'], {}), '(ret)\n', (14765, 14770), True, 'import numpy as np\n'), ((16924, 16938), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (16932, 16938), True, 'import numpy as np\n'), ((16942, 16959), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (16956, 16959), True, 'import numpy as np\n'), ((17174, 17191), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (17188, 17191), True, 'import numpy as np\n'), ((21250, 21263), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (21258, 21263), False, 'from scipy.sparse import issparse\n'), ((21364, 21377), 'scipy.sparse.issparse', 'issparse', (['mat'], {}), '(mat)\n', (21372, 21377), False, 'from scipy.sparse import issparse\n'), ((24329, 24361), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_metrics'], {}), '(group_wise_metrics)\n', (24341, 24361), True, 'import pandas as pd\n'), ((24481, 24517), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_pos_metrics'], {}), '(group_wise_pos_metrics)\n', (24493, 24517), True, 'import pandas as pd\n'), ((24643, 24679), 'pandas.DataFrame', 'pd.DataFrame', (['group_wise_neg_metrics'], {}), '(group_wise_neg_metrics)\n', (24655, 24679), True, 'import pandas as pd\n'), ((24781, 24812), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_metrics'], {}), '(gene_wise_metrics)\n', (24793, 24812), True, 'import pandas as pd\n'), ((24814, 24843), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_genes'], {}), '(gene_wise_genes)\n', (24826, 24843), True, 'import pandas as pd\n'), ((24934, 24969), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_pos_metrics'], {}), '(gene_wise_pos_metrics)\n', (24946, 24969), True, 'import pandas as pd\n'), ((24971, 25004), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_pos_genes'], {}), '(gene_wise_pos_genes)\n', (24983, 25004), True, 'import pandas as pd\n'), ((25095, 25130), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_neg_metrics'], {}), '(gene_wise_neg_metrics)\n', (25107, 25130), True, 'import pandas as pd\n'), ((25132, 25165), 'pandas.DataFrame', 'pd.DataFrame', (['gene_wise_neg_genes'], {}), '(gene_wise_neg_genes)\n', (25144, 25165), True, 'import pandas as pd\n'), ((3270, 3310), 'numpy.matlib.tile', 'np.matlib.tile', (['y[:, :, None]', '[1, 1, n]'], {}), '(y[:, :, None], [1, 1, n])\n', (3284, 3310), True, 'import numpy as np\n'), ((5246, 5265), 'numpy.kron', 'np.kron', (['tmp3', 'tmp4'], {}), '(tmp3, tmp4)\n', (5253, 5265), True, 'import numpy as np\n'), ((9589, 9614), 'itertools.repeat', 'itertools.repeat', (['vf_dict'], {}), '(vf_dict)\n', (9605, 9614), False, 'import itertools, functools\n'), ((17126, 17138), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (17132, 17138), True, 'import numpy as np\n'), ((17157, 17169), 'numpy.dot', 'np.dot', (['v', 'a'], {}), '(v, a)\n', (17163, 17169), True, 'import numpy as np\n'), ((17310, 17324), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (17318, 17324), True, 'import numpy as np\n'), ((17356, 17370), 'numpy.outer', 'np.outer', (['v', 'a'], {}), '(v, a)\n', (17364, 17370), True, 'import numpy as np\n'), ((18644, 18670), 'numpy.linalg.norm', 'np.linalg.norm', (['cur_mat[i]'], {}), '(cur_mat[i])\n', (18658, 18670), True, 'import numpy as np\n'), ((19578, 19592), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (19589, 19592), True, 'import numdifftools as nd\n'), ((19986, 20000), 'numdifftools.Jacobian', 'nd.Jacobian', (['f'], {}), '(f)\n', (19997, 20000), True, 'import numdifftools as nd\n'), ((14623, 14643), 'itertools.repeat', 'itertools.repeat', (['Qi'], {}), '(Qi)\n', (14639, 14643), False, 'import itertools, functools\n'), ((14645, 14665), 'itertools.repeat', 'itertools.repeat', (['Qj'], {}), '(Qj)\n', (14661, 14665), False, 'import itertools, functools\n')]
|
"""
***************************************************************************
OshAdjustGradient.py
---------------------
Date : Nov 2020
Copyright : (C) 2020 by <NAME>
Email : <EMAIL> at g<EMAIL> dot <EMAIL>
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
v2.11 16 Jan 2021
Fixed line 267 ZeroDivisionError
"""
__author__ = '<NAME>'
__date__ = 'Nov 2020'
__copyright__ = '(C) 2020, <NAME> Hai'
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingContext,
QgsProcessingException,
QgsProcessingParameterBoolean,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterVectorDestination,
QgsProcessingUtils,
QgsProcessingException,
QgsFeatureSink,
)
from qgis import processing
from qgis.core import (
QgsFeature,QgsField, QgsFields,
QgsGeometry, QgsGeometryUtils,
QgsProject, QgsProperty, QgsVectorLayer,
QgsExpressionContextUtils,
QgsLineSymbol,
QgsRendererCategory,
QgsCategorizedSymbolRenderer,
QgsSpatialIndex,
QgsVertexId,
QgsGeometryUtils )
class AdjustGradient(QgsProcessingAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
GRADLIM = 'GRADLIM'
AUTONAME = 'AUTONAME'
VISOFF = 'VISOFF'
OUTPUT = 'OUTPUT'
OUTPUT2 = 'OUTPUT2'
def createInstance(self):
return AdjustGradient()
def name(self):
return 'adjustgradient'
def displayName(self):
return ('Adjust segment gradient')
def group(self):
return ('IMP Tools')
def groupId(self):
return 'imp'
def shortHelpString(self):
return ('Adjust all road segment gradients and junction node elevations'
'\n'
'Adjust all road segment gradients to be not steeper than the input value. '
'Z values of segment endpoints and intermediate vertices are adjusted in the output layer. '
'Z values of nodes around adjusted segments are also adjusted in the output layer. '
'\n'
'The input Segmentz map layer must have a grad field.'
'\n'
'The algorithm works by adjusting the steepest segment down to the input steepness value. '
'Connected segments are then adjusted. '
'Following that, the algorithm adjusts the next steepest unconnected segment. '
'\n'
'If there are still steep segments after the algorithm have passed through all segments, the process is repeated. '
'A maximum of three iterations has been coded in the algorithm.'
)
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT,'INPUT: Segment',
[QgsProcessing.TypeVectorLine],'Segmentz' ) )
self.addParameter(QgsProcessingParameterVectorLayer(
self.INPUT2,'INPUT2: Node',
[QgsProcessing.TypeVectorPoint],'Nodez' ) )
self.addParameter(QgsProcessingParameterNumber(
self.GRADLIM,'Not steeper than 1:' ,
defaultValue= 25) )
self.addParameter(QgsProcessingParameterBoolean(
self.AUTONAME,'Output auto naming ',
defaultValue=True))
self.addParameter(QgsProcessingParameterBoolean(
self.VISOFF,'Turn off other layers ',
defaultValue=True))
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT,'Node_adjusted',
QgsProcessing.TypeVectorAnyGeometry ) )
self.addParameter(QgsProcessingParameterFeatureSink(
self.OUTPUT2,'Segment_adjusted',
QgsProcessing.TypeVectorAnyGeometry ) )
def processAlgorithm(self, parameters, context, feedback):
maxitera = 3 # maximum number of iterations (repeat for all segments)
seglay = self.parameterAsVectorLayer(parameters, self.INPUT, context)
if seglay is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.INPUT))
nodelay = self.parameterAsVectorLayer(parameters, self.INPUT2, context)
if nodelay is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.INPUT2))
self.gradlim = self.parameterAsInt(parameters, self.GRADLIM, context)
if self.gradlim is None:
raise QgsProcessingException(self.InvalidSourceError(parameters, self.gradlim))
autonaming = self.parameterAsBoolean( parameters, self.AUTONAME, context )
visibleoff = self.parameterAsBoolean( parameters, self.VISOFF, context )
# Node_adjusted output
newfields = QgsFields()
newfields.append(QgsField('id', QVariant.Int))
newfields.append(QgsField('z', QVariant.Double))
newfields.append(QgsField('oldz', QVariant.Double))
newfields.append(QgsField('adj', QVariant.Double))
(sink, self.dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
newfields,
1001, # PointZ wkbType
nodelay.sourceCrs()
)
# Segment_adjusted output
newfields = QgsFields()
newfields.append(QgsField('lid', QVariant.Int))
newfields.append(QgsField('wid', QVariant.Double))
newfields.append(QgsField('grad', QVariant.Double))
newfields.append(QgsField('styl', QVariant.Int))
newfields.append(QgsField('oldgrad', QVariant.Double))
(sink2, self.dest_id2) = self.parameterAsSink(
parameters,
self.OUTPUT2,
context,
newfields,
1002, # LineStringZ wkbType
seglay.sourceCrs()
)
# visible off
if visibleoff:
r = QgsProject.instance().layerTreeRoot()
layers = r.checkedLayers()
for lay in layers:
r.findLayer(lay.id()).setItemVisibilityChecked(False)
# Store into memory
d_idz={}
for f in nodelay.getFeatures():
id=f.id()
try:
z=f['z']
except:
raise QgsProcessingException('Error! Field: z not found in input node layer')
d_idz[id]=z
d_lidgrad={}
d_lidleng={}
lislidwkg=[]
d_lidgradwkg={}
d_lideid={}
d_lidsid={}
index = QgsSpatialIndex(nodelay.getFeatures())
for f in seglay.getFeatures():
lid=f['lid']
try:
grad=f['grad']
except:
raise QgsProcessingException('Error! Field: grad not found in input segment layer')
if not grad:
grad=9999
d_lidgrad[lid]=grad
lislidwkg.append(lid)
d_lidgradwkg[lid]=grad
lin = f.geometry().constGet()
pgeom = QgsGeometry(lin[0])
nearest = index.nearestNeighbor(pgeom, 1)
sid = nearest[0]
sz = d_idz[sid]
pgeom = QgsGeometry(lin[-1])
nearest = index.nearestNeighbor(pgeom, 1)
eid = nearest[0]
ez = d_idz[eid]
d_lidleng[lid]=f.geometry().constGet().length()
d_lidsid[lid]=sid
d_lideid[lid]=eid
# adjust next (steepest)
for itera in range(0,maxitera):
for lid in lislidwkg:
lid = min(d_lidgradwkg, key=d_lidgradwkg.get)
if d_lidgrad[lid] > self.gradlim:
break
d_lidgradwkg.pop(lid)
leng = d_lidleng[lid]
grad = d_lidgrad[lid]
ej = round( -(leng/grad - leng/self.gradlim),1)
eid = d_lideid[lid]
ez = d_idz[eid]
sid = d_lidsid[lid]
sz = d_idz[sid]
if ez<sz: # skip adjusting ez down if ez<sz
break
ezj = round((ez + ej),1)
if ezj==sz:
ng = 9999
else:
newgrad = abs(leng/(ezj-sz))
ng = round(newgrad,1)
d_lidgrad[lid]=ng
# update d_idz
d_idz[eid]=ezj
# update grad of connected segments
lidlisteid = [l for l,i in d_lideid.items() if i == eid]
lidlistsid = [l for l,i in d_lidsid.items() if i == eid]
lidlist = lidlisteid + lidlistsid
lidlist = list(dict.fromkeys(lidlist))
lidlist.remove(lid)
# print('lid',lid,'eid',eid,'connected lid\n',lidlist)
for lid in lidlist:
eid = d_lideid[lid]
sid = d_lidsid[lid]
ez = d_idz[eid]
sz = d_idz[sid]
leng = d_lidleng[lid]
oldgrad = d_lidgrad[lid]
if ez==sz:
newgrad=9999
else:
newgrad = abs(leng/(ez-sz))
d_lidgrad[lid] = round(newgrad,1)
# refill dict for next iteration
d_lidgradwkg = d_lidgrad.copy()
# adjust
for f in seglay.getFeatures():
geom = f.geometry()
lin = geom.constGet()
lid = f['lid']
wid = f['wid']
leng = lin.length()
eid = d_lideid[lid]
ez = d_idz[eid]
sid = d_lidsid[lid]
sz = d_idz[sid]
# reverse if ez<sz
if ez<sz:
# workaround to overcome Qgis crashing
geom=QgsGeometry(lin.reversed())
lin = geom.constGet()
temp = eid
eid = sid
sid = temp
d_lideid[lid] = eid
d_lidsid[lid] = sid
temp = ez
ez = sz
sz = temp
# insert z into vertices
lin.dropZValue()
lin.addZValue(0)
lin.setZAt(0,sz)
lin.setZAt(-1,ez)
n = lin.numPoints()
if n > 2:
for i in range(1,n-1):
v = QgsVertexId(0,0,i)
d = QgsGeometryUtils.distanceToVertex(lin,v)
z = d/leng * (ez-sz) + sz
lin.setZAt(i,z)
seglay.changeGeometry(f.id(), geom)
oldgrad = f['grad']
if not oldgrad:
oldgrad = 9999
grad = d_lidgrad[lid]
if grad<(self.gradlim-0.5):
styl = 1
elif grad!=oldgrad:
styl = 2
else:
styl = 0
if grad>50:
grad = round(grad,0)
if oldgrad>50:
oldgrad = round(oldgrad,0)
g = QgsFeature()
g.setGeometry(geom)
g.setAttributes([lid,wid, grad,styl,oldgrad])
sink2.addFeature(g, QgsFeatureSink.FastInsert)
i=0
feedback.pushInfo( '\n####################################\n' )
for f in nodelay.getFeatures():
id = f.id()
oldz = f['z']
z = d_idz[id]
adj = round( (oldz - z), 1 )
if adj !=0:
feedback.pushInfo( 'Node {} elevation adjusted {} meters'.format(id,adj) )
i+=1
geom = f.geometry()
p = geom.constGet()
p.setZ(z)
g = QgsFeature()
g.setGeometry(geom)
g.setAttributes([id,z,oldz,adj])
sink.addFeature(g, QgsFeatureSink.FastInsert)
feedback.pushInfo( '\nSEGMENTS AND ' + str(i) + ' NODES ADJUSTED' )
feedback.pushInfo( '\n\nOshAdjustGradient.py v2.11\n'
'####################################\n\n' )
if autonaming:
nodename = 'Node_' + str(self.gradlim)
segname = 'Segment_' + str(self.gradlim)
context.addLayerToLoadOnCompletion(self.dest_id,context.LayerDetails(
name=nodename,project=context.project() ))
context.addLayerToLoadOnCompletion(self.dest_id2,context.LayerDetails(
name=segname,project=context.project() ))
return {self.OUTPUT: self.dest_id, self.OUTPUT2: self.dest_id2}
def postProcessAlgorithm(self, context, feedback):
project = QgsProject.instance()
scope = QgsExpressionContextUtils.projectScope(project)
projfold = scope.variable('project_folder')
nodeqml = projfold + '\\qsettings\\Node_adjusted.qml'
segqml = projfold + '\\qsettings\\Segment_adjusted.qml'
layer2 = QgsProcessingUtils.mapLayerFromString(self.dest_id, context)
layer2.loadNamedStyle(nodeqml)
layer3 = QgsProcessingUtils.mapLayerFromString(self.dest_id2, context)
layer3.loadNamedStyle(segqml)
# necessary to customize categories based on self.gradlim input
# default style is only for self.gradlim = 25
catren = QgsCategorizedSymbolRenderer()
catren.setClassAttribute('styl')
linsym1 = QgsLineSymbol.createSimple( {'width':'1','color':'pink'} )
linsym2 = QgsLineSymbol.createSimple( {'width':'.8','color':'green'} )
linsym3 = QgsLineSymbol.createSimple( {'width':'.1','color':'blue'} )
exp1 = 'grad<'+str(self.gradlim-0.5)
exp2 = 'grad changed'
cat1 = QgsRendererCategory('1', linsym1, exp1)
cat2 = QgsRendererCategory('2', linsym2, exp2)
cat3 = QgsRendererCategory('0', linsym3, '')
catren.addCategory(cat1)
catren.addCategory(cat2)
catren.addCategory(cat3)
layer3.setRenderer(catren)
layer3.triggerRepaint()
return {self.OUTPUT: self.dest_id, self.OUTPUT2: self.dest_id2}
|
[
"qgis.core.QgsProject.instance",
"qgis.core.QgsRendererCategory",
"qgis.core.QgsGeometryUtils.distanceToVertex",
"qgis.core.QgsField",
"qgis.core.QgsGeometry",
"qgis.core.QgsProcessingParameterFeatureSink",
"qgis.core.QgsFeature",
"qgis.core.QgsProcessingParameterNumber",
"qgis.core.QgsFields",
"qgis.core.QgsProcessingException",
"qgis.core.QgsProcessingParameterVectorLayer",
"qgis.core.QgsLineSymbol.createSimple",
"qgis.core.QgsVertexId",
"qgis.core.QgsCategorizedSymbolRenderer",
"qgis.core.QgsProcessingUtils.mapLayerFromString",
"qgis.core.QgsExpressionContextUtils.projectScope",
"qgis.core.QgsProcessingParameterBoolean"
] |
[((5879, 5890), 'qgis.core.QgsFields', 'QgsFields', ([], {}), '()\n', (5888, 5890), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6407, 6418), 'qgis.core.QgsFields', 'QgsFields', ([], {}), '()\n', (6416, 6418), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((14226, 14247), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (14245, 14247), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((14264, 14311), 'qgis.core.QgsExpressionContextUtils.projectScope', 'QgsExpressionContextUtils.projectScope', (['project'], {}), '(project)\n', (14302, 14311), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((14517, 14577), 'qgis.core.QgsProcessingUtils.mapLayerFromString', 'QgsProcessingUtils.mapLayerFromString', (['self.dest_id', 'context'], {}), '(self.dest_id, context)\n', (14554, 14577), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((14635, 14696), 'qgis.core.QgsProcessingUtils.mapLayerFromString', 'QgsProcessingUtils.mapLayerFromString', (['self.dest_id2', 'context'], {}), '(self.dest_id2, context)\n', (14672, 14696), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((14897, 14927), 'qgis.core.QgsCategorizedSymbolRenderer', 'QgsCategorizedSymbolRenderer', ([], {}), '()\n', (14925, 14927), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((14988, 15047), 'qgis.core.QgsLineSymbol.createSimple', 'QgsLineSymbol.createSimple', (["{'width': '1', 'color': 'pink'}"], {}), "({'width': '1', 'color': 'pink'})\n", (15014, 15047), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((15065, 15126), 'qgis.core.QgsLineSymbol.createSimple', 'QgsLineSymbol.createSimple', (["{'width': '.8', 'color': 'green'}"], {}), "({'width': '.8', 'color': 'green'})\n", (15091, 15126), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((15144, 15204), 'qgis.core.QgsLineSymbol.createSimple', 'QgsLineSymbol.createSimple', (["{'width': '.1', 'color': 'blue'}"], {}), "({'width': '.1', 'color': 'blue'})\n", (15170, 15204), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((15303, 15342), 'qgis.core.QgsRendererCategory', 'QgsRendererCategory', (['"""1"""', 'linsym1', 'exp1'], {}), "('1', linsym1, exp1)\n", (15322, 15342), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((15358, 15397), 'qgis.core.QgsRendererCategory', 'QgsRendererCategory', (['"""2"""', 'linsym2', 'exp2'], {}), "('2', linsym2, exp2)\n", (15377, 15397), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((15413, 15450), 'qgis.core.QgsRendererCategory', 'QgsRendererCategory', (['"""0"""', 'linsym3', '""""""'], {}), "('0', linsym3, '')\n", (15432, 15450), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((3662, 3774), 'qgis.core.QgsProcessingParameterVectorLayer', 'QgsProcessingParameterVectorLayer', (['self.INPUT', '"""INPUT: Segment"""', '[QgsProcessing.TypeVectorLine]', '"""Segmentz"""'], {}), "(self.INPUT, 'INPUT: Segment', [\n QgsProcessing.TypeVectorLine], 'Segmentz')\n", (3695, 3774), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((3831, 3940), 'qgis.core.QgsProcessingParameterVectorLayer', 'QgsProcessingParameterVectorLayer', (['self.INPUT2', '"""INPUT2: Node"""', '[QgsProcessing.TypeVectorPoint]', '"""Nodez"""'], {}), "(self.INPUT2, 'INPUT2: Node', [\n QgsProcessing.TypeVectorPoint], 'Nodez')\n", (3864, 3940), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((4009, 4095), 'qgis.core.QgsProcessingParameterNumber', 'QgsProcessingParameterNumber', (['self.GRADLIM', '"""Not steeper than 1:"""'], {'defaultValue': '(25)'}), "(self.GRADLIM, 'Not steeper than 1:',\n defaultValue=25)\n", (4037, 4095), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((4155, 4245), 'qgis.core.QgsProcessingParameterBoolean', 'QgsProcessingParameterBoolean', (['self.AUTONAME', '"""Output auto naming """'], {'defaultValue': '(True)'}), "(self.AUTONAME, 'Output auto naming ',\n defaultValue=True)\n", (4184, 4245), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((4309, 4400), 'qgis.core.QgsProcessingParameterBoolean', 'QgsProcessingParameterBoolean', (['self.VISOFF', '"""Turn off other layers """'], {'defaultValue': '(True)'}), "(self.VISOFF, 'Turn off other layers ',\n defaultValue=True)\n", (4338, 4400), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((4471, 4575), 'qgis.core.QgsProcessingParameterFeatureSink', 'QgsProcessingParameterFeatureSink', (['self.OUTPUT', '"""Node_adjusted"""', 'QgsProcessing.TypeVectorAnyGeometry'], {}), "(self.OUTPUT, 'Node_adjusted',\n QgsProcessing.TypeVectorAnyGeometry)\n", (4504, 4575), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((4646, 4754), 'qgis.core.QgsProcessingParameterFeatureSink', 'QgsProcessingParameterFeatureSink', (['self.OUTPUT2', '"""Segment_adjusted"""', 'QgsProcessing.TypeVectorAnyGeometry'], {}), "(self.OUTPUT2, 'Segment_adjusted',\n QgsProcessing.TypeVectorAnyGeometry)\n", (4679, 4754), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((5916, 5944), 'qgis.core.QgsField', 'QgsField', (['"""id"""', 'QVariant.Int'], {}), "('id', QVariant.Int)\n", (5924, 5944), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((5972, 6002), 'qgis.core.QgsField', 'QgsField', (['"""z"""', 'QVariant.Double'], {}), "('z', QVariant.Double)\n", (5980, 6002), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6029, 6062), 'qgis.core.QgsField', 'QgsField', (['"""oldz"""', 'QVariant.Double'], {}), "('oldz', QVariant.Double)\n", (6037, 6062), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6090, 6122), 'qgis.core.QgsField', 'QgsField', (['"""adj"""', 'QVariant.Double'], {}), "('adj', QVariant.Double)\n", (6098, 6122), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6444, 6473), 'qgis.core.QgsField', 'QgsField', (['"""lid"""', 'QVariant.Int'], {}), "('lid', QVariant.Int)\n", (6452, 6473), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6501, 6533), 'qgis.core.QgsField', 'QgsField', (['"""wid"""', 'QVariant.Double'], {}), "('wid', QVariant.Double)\n", (6509, 6533), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6561, 6594), 'qgis.core.QgsField', 'QgsField', (['"""grad"""', 'QVariant.Double'], {}), "('grad', QVariant.Double)\n", (6569, 6594), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6622, 6652), 'qgis.core.QgsField', 'QgsField', (['"""styl"""', 'QVariant.Int'], {}), "('styl', QVariant.Int)\n", (6630, 6652), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((6680, 6716), 'qgis.core.QgsField', 'QgsField', (['"""oldgrad"""', 'QVariant.Double'], {}), "('oldgrad', QVariant.Double)\n", (6688, 6716), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((8204, 8223), 'qgis.core.QgsGeometry', 'QgsGeometry', (['lin[0]'], {}), '(lin[0])\n', (8215, 8223), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((8370, 8390), 'qgis.core.QgsGeometry', 'QgsGeometry', (['lin[-1]'], {}), '(lin[-1])\n', (8381, 8390), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((12589, 12601), 'qgis.core.QgsFeature', 'QgsFeature', ([], {}), '()\n', (12599, 12601), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((13260, 13272), 'qgis.core.QgsFeature', 'QgsFeature', ([], {}), '()\n', (13270, 13272), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((7027, 7048), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (7046, 7048), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((7425, 7496), 'qgis.core.QgsProcessingException', 'QgsProcessingException', (['"""Error! Field: z not found in input node layer"""'], {}), "('Error! Field: z not found in input node layer')\n", (7447, 7496), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((7886, 7963), 'qgis.core.QgsProcessingException', 'QgsProcessingException', (['"""Error! Field: grad not found in input segment layer"""'], {}), "('Error! Field: grad not found in input segment layer')\n", (7908, 7963), False, 'from qgis.core import QgsProcessing, QgsProcessingAlgorithm, QgsProcessingContext, QgsProcessingException, QgsProcessingParameterBoolean, QgsProcessingParameterVectorLayer, QgsProcessingParameterFeatureSink, QgsProcessingParameterNumber, QgsProcessingParameterString, QgsProcessingParameterVectorDestination, QgsProcessingUtils, QgsProcessingException, QgsFeatureSink\n'), ((11869, 11889), 'qgis.core.QgsVertexId', 'QgsVertexId', (['(0)', '(0)', 'i'], {}), '(0, 0, i)\n', (11880, 11889), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n'), ((11912, 11953), 'qgis.core.QgsGeometryUtils.distanceToVertex', 'QgsGeometryUtils.distanceToVertex', (['lin', 'v'], {}), '(lin, v)\n', (11945, 11953), False, 'from qgis.core import QgsFeature, QgsField, QgsFields, QgsGeometry, QgsGeometryUtils, QgsProject, QgsProperty, QgsVectorLayer, QgsExpressionContextUtils, QgsLineSymbol, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsSpatialIndex, QgsVertexId, QgsGeometryUtils\n')]
|
import codecs
import os.path
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="QtDesign6",
version=get_version("src\\QtDesign6\\__init__.py"),
author="Jevex",
author_email="<EMAIL>",
description="Custom widgets and utilities for PySide6",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JevexEndo/qtdesign6",
project_urls={
"Bug Tracker": "https://github.com/JevexEndo/qtdesign6/issues",
},
classifiers=[
"Development Status :: 4 - Beta ",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.8",
)
|
[
"setuptools.find_packages"
] |
[((1412, 1449), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (1436, 1449), False, 'import setuptools\n')]
|
#!/usr/bin/env python
import json
import os
import sys
from jsonpath_ng import parse
from lxml import etree
SPEC_DIR = f"{os.path.dirname(os.path.realpath(__file__))}/../specification"
def main(file: str):
with open(file, 'r') as f:
spec = json.load(f)
req_path = parse("$.paths.['/$convert'].post.requestBody.content.*.examples.*.['$ref']")
req_examples = req_path.find(spec)
inline_examples(spec, req_examples)
res_path = parse("$.paths.['/$convert'].post.responses.*.content.*.examples.*.['$ref']")
res_examples = res_path.find(spec)
inline_examples(spec, res_examples)
print(json.dumps(spec))
def inline_examples(spec, examples_path):
for example in examples_path:
ref = example.full_path
example_file_content = read_example_from_component(spec, ref)
example_path = ref.left
example_path.update(spec, {"value": example_file_content})
def read_example_from_component(spec: dict, path):
component = path.find(spec)[0].value
com_path = component.replace("#/", "").replace("/", ".")
example_path = parse(f"$.{com_path}.value.['$ref']")
match = example_path.find(spec)[0].value
if match.endswith(".json") or match.endswith(".xml"):
with open(f"{SPEC_DIR}/{match}", "r") as example_content:
if match.endswith(".xml"):
return pretty_print_xml(example_content)
elif match.endswith(".json"):
return example_content.read()
def pretty_print_xml(content):
x = etree.parse(content)
return etree.tostring(x, pretty_print=True, encoding=str)
if __name__ == '__main__':
main(sys.argv[1])
|
[
"json.load",
"os.path.realpath",
"json.dumps",
"jsonpath_ng.parse",
"lxml.etree.parse",
"lxml.etree.tostring"
] |
[((1127, 1164), 'jsonpath_ng.parse', 'parse', (['f"""$.{com_path}.value.[\'$ref\']"""'], {}), '(f"$.{com_path}.value.[\'$ref\']")\n', (1132, 1164), False, 'from jsonpath_ng import parse\n'), ((1560, 1580), 'lxml.etree.parse', 'etree.parse', (['content'], {}), '(content)\n', (1571, 1580), False, 'from lxml import etree\n'), ((1592, 1642), 'lxml.etree.tostring', 'etree.tostring', (['x'], {'pretty_print': '(True)', 'encoding': 'str'}), '(x, pretty_print=True, encoding=str)\n', (1606, 1642), False, 'from lxml import etree\n'), ((257, 269), 'json.load', 'json.load', (['f'], {}), '(f)\n', (266, 269), False, 'import json\n'), ((290, 367), 'jsonpath_ng.parse', 'parse', (['"""$.paths.[\'/$convert\'].post.requestBody.content.*.examples.*.[\'$ref\']"""'], {}), '("$.paths.[\'/$convert\'].post.requestBody.content.*.examples.*.[\'$ref\']")\n', (295, 367), False, 'from jsonpath_ng import parse\n'), ((475, 552), 'jsonpath_ng.parse', 'parse', (['"""$.paths.[\'/$convert\'].post.responses.*.content.*.examples.*.[\'$ref\']"""'], {}), '("$.paths.[\'/$convert\'].post.responses.*.content.*.examples.*.[\'$ref\']")\n', (480, 552), False, 'from jsonpath_ng import parse\n'), ((141, 167), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (157, 167), False, 'import os\n'), ((655, 671), 'json.dumps', 'json.dumps', (['spec'], {}), '(spec)\n', (665, 671), False, 'import json\n')]
|
bucketName = 'org.cicsnc.albedo'
basePath = 'Input/area/'
satellite = 'goes13'
year = '2017'
startDay = 1
endDay = 10
filterBand = 'BAND_01'
dryrun = False
import re
from os import fdopen, remove
from shutil import move
from tempfile import mkstemp
import boto3
def replace(file_path, pattern, subst):
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(re.sub(pattern, subst, line))
remove(file_path)
move(abs_path, file_path)
def removePrefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def getS3FileNames(bucket, folder):
keys = []
kwargs = {'Bucket': bucket, 'Prefix': folder}
while True:
resp = s3.list_objects_v2(**kwargs)
for obj in resp['Contents']:
keys.append(obj['Key'])
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
return keys
if satellite == 'goes08' or 'goes12' or 'goes13' or 'goes14':
startTimeA = 0
endTimeA = 130
startTimeB = 730
endTimeB = 2359
replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_075_VIS02')
elif satellite == 'goes09' or 'goes10' or 'goes11' or 'goes15':
startTimeA = 0
endTimeA = 530
startTimeB = 1130
endTimeB = 2359
replace('../ancillary.src/AlgorithmConfigurationFile_docker', 'GRIDID=GOES_..._VIS02', 'GRIDID=GOES_135_VIS02')
else:
print("Invalid satellite setting")
exit(-1)
s3 = boto3.client('s3')
s3.upload_file("../ancillary.src/AlgorithmConfigurationFile_docker", bucketName, "AlgorithmConfigurationFile_docker")
fileNames = getS3FileNames(bucketName, basePath)
fileNames = map(lambda d: removePrefix(d, basePath), fileNames)[1:]
fileNames = filter(lambda f: f.startswith(satellite), fileNames)
fileNames = filter(lambda f: f.endswith(filterBand), fileNames)
fileNames = filter(lambda f: f[7:11] == year, fileNames)
days = list("%03d" % day for day in range(startDay, endDay + 1))
fileNames = [x for x in fileNames if x[12:15] in days]
timesA = list("%04d" % time for time in range(startTimeA, endTimeA + 1))
timesB = list("%04d" % time for time in range(startTimeB, endTimeB + 1))
fileNames = [x for x in fileNames if x[16:20] in timesA + timesB]
client = boto3.client('sqs', region_name='us-east-1')
queues = client.list_queues(QueueNamePrefix='AlbedoPh1')
queueURL = queues['QueueUrls'][0]
for fileName in fileNames:
print(fileName)
if not dryrun:
enqueueResponse = client.send_message(QueueUrl=queueURL, MessageBody=fileName)
print(len(fileNames))
|
[
"os.remove",
"tempfile.mkstemp",
"boto3.client",
"shutil.move",
"os.fdopen",
"re.sub"
] |
[((1616, 1634), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1628, 1634), False, 'import boto3\n'), ((2399, 2443), 'boto3.client', 'boto3.client', (['"""sqs"""'], {'region_name': '"""us-east-1"""'}), "('sqs', region_name='us-east-1')\n", (2411, 2443), False, 'import boto3\n'), ((325, 334), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (332, 334), False, 'from tempfile import mkstemp\n'), ((514, 531), 'os.remove', 'remove', (['file_path'], {}), '(file_path)\n', (520, 531), False, 'from os import fdopen, remove\n'), ((536, 561), 'shutil.move', 'move', (['abs_path', 'file_path'], {}), '(abs_path, file_path)\n', (540, 561), False, 'from shutil import move\n'), ((344, 359), 'os.fdopen', 'fdopen', (['fh', '"""w"""'], {}), "(fh, 'w')\n", (350, 359), False, 'from os import fdopen, remove\n'), ((480, 508), 're.sub', 're.sub', (['pattern', 'subst', 'line'], {}), '(pattern, subst, line)\n', (486, 508), False, 'import re\n')]
|
# -*- coding:utf-8 -*-
# Copyright 2019 TEEX
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from backend.dispatcher.request_dispatcher import RequestDispatcher
import time
import threading
class Resolver(threading.Thread):
def __init__(self, monitors, configs):
threading.Thread.__init__(self)
self.monitors = monitors
self.dispatcher = RequestDispatcher(configs)
def run(self):
while True:
for monitor in self.monitors:
request = monitor.get_front_request()
if request is not None:
# resolve the request params
# dispatch the request
self.dispatcher.dispatch_request(request)
else:
time.sleep(1)
|
[
"threading.Thread.__init__",
"backend.dispatcher.request_dispatcher.RequestDispatcher",
"time.sleep"
] |
[((794, 825), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (819, 825), False, 'import threading\n'), ((885, 911), 'backend.dispatcher.request_dispatcher.RequestDispatcher', 'RequestDispatcher', (['configs'], {}), '(configs)\n', (902, 911), False, 'from backend.dispatcher.request_dispatcher import RequestDispatcher\n'), ((1285, 1298), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1295, 1298), False, 'import time\n')]
|
from spack import *
from glob import glob
import os
class Tensorflow(Package):
"""TensorFlow is an Open Source Software Library for Machine Intelligence"""
homepage = "https://www.tensorflow.org"
url = "https://github.com/tensorflow/tensorflow/archive/v0.10.0.tar.gz"
version('2.0.0-alpha0', 'a26886611105d3399c2a5985fe14d904')
version('1.13.1', '0fd6bd88f880c1d907e0bd898b37ee1b', preferred=True)
version('1.12.0', '48164180a2573e75f1c8dff492a550a0')
version('1.9.0', '3426192cce0f8e070b2010e5bd5695cd')
version('1.8.0', 'cd45874be9296644471dd43e7da3fbd0')
version('1.6.0', '6dc60ac37e49427cd7069968da42c1ac')
version('1.5.0', 'e087dc1f47dbbda87cf4278acddf785b')
version('1.3.0', '01c008c58d206324ef68cd5116a83965')
version('1.2.0', '3f15746caabfd2583724258643fd1678')
version('1.1.0', 'fb745649d33954c97d29b7acaffe7d65')
version('1.0.0-rc2', 'a058a7e0ba2b9761cf2420c82d520049')
version('0.10.0', 'b75cbd494d61a809af5ef25d7fba561b')
depends_on('swig', type='build')
# old tensorflow needs old bazel
depends_on('bazel@0.19.0', type='build', when='@1.13.0:')
depends_on('bazel@0.15.0', type='build', when='@1.8.0:1.12.0')
depends_on('bazel@0.9.0', type='build', when='@1.5.0:1.6.0')
depends_on('bazel@0.4.5', type='build', when='@1.2.0:1.3.0')
depends_on('bazel@0.4.4:0.4.999', type='build', when='@1.0.0:1.1.0')
depends_on('bazel@0.3.1:0.4.999', type='build', when='@:1.0.0')
extends('python')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-numpy@1.11.0:', type=('build', 'run'))
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-protobuf@3.6.0:', type=('build', 'run'), when='@1.8.0:')
depends_on('py-protobuf@3.3.0:', type=('build', 'run'), when='@1.3.0:1.6.0')
depends_on('py-protobuf@3.0.0b2', type=('build', 'run'), when='@:1.2.0')
depends_on('py-wheel', type=('build', 'run'))
depends_on('py-mock@2.0.0:', type=('build', 'run'))
depends_on('py-enum34@1.1.6:', type=('build', 'run'), when='@1.5.0: ^python@:3.3.999')
depends_on('py-absl-py@0.1.6', type=('build', 'run'), when='@1.5.0:')
depends_on('py-astor@0.1.6:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-gast@0.2.0:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-grpcio@1.8.6:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-termcolor@1.1.0:', type=('build', 'run'), when='@1.6.0:')
depends_on('py-keras-applications@1.0.6:', type=('build', 'run'), when='@1.12.0:')
depends_on('py-keras-preprocessing@1.0.5:', type=('build', 'run'), when='@1.12.0:')
depends_on('py-h5py', type=('build', 'run'), when='@1.12.0:')
depends_on('py-google-pasta@0.1.2:', type=('build', 'run'), when='@2.0.0:')
patch('url-zlib.patch', when='@0.10.0')
patch('crosstool.patch', when='@1.0.0-rc2') # auch auf 0.10.0 wenn mit cuda!
patch('sha-icu.patch', when='@1.13.1')
variant('gcp', default=False,
description='Enable Google Cloud Platform Support')
variant('cuda', default=False,
description='Enable CUDA Support')
# openssl can be used to replace bazel's boringssl
# e.g., when system openssl is available, boringssl runs into namespace conflicts
variant('openssl', default=False,
description='Build with openssl instead of Bazel boringssl')
depends_on('cuda', when='+cuda')
depends_on('cudnn', when='+cuda')
depends_on('openssl@1.0.2:', type=('build', 'run'), when='@1.12.0:+openssl')
def setup_environment(self, spack_env, run_env):
# needed when building with openssl instead of bazel's boringssl
if self.spec.satisfies('@1.12.0:+openssl'):
openssl_lib_path = self.spec['openssl'].libs.search_flags[2:]
spack_env.prepend_path('LD_LIBRARY_PATH', openssl_lib_path)
run_env.prepend_path('LD_LIBRARY_PATH', openssl_lib_path)
def install(self, spec, prefix):
if '+gcp' in spec:
env['TF_NEED_GCP'] = '1'
else:
env['TF_NEED_GCP'] = '0'
env['PYTHON_BIN_PATH'] = str(spec['python'].prefix.bin) + '/python'
env['SWIG_PATH'] = str(spec['swig'].prefix.bin)
env['GCC_HOST_COMPILER_PATH'] = spack_cc
if '+cuda' in spec:
env['TF_NEED_CUDA'] = '1'
env['TF_CUDA_VERSION'] = str(spec['cuda'].version)
env['CUDA_TOOLKIT_PATH'] = str(spec['cuda'].prefix)
env['TF_CUDNN_VERSION'] = str(spec['cudnn'].version)[0]
env['CUDNN_INSTALL_PATH'] = str(spec['cudnn'].prefix)
env['TF_CUDA_COMPUTE_CAPABILITIES'] = '3.5,5.2'
else:
env['TF_NEED_CUDA'] = '0'
env['TF_CUDA_VERSION'] = ''
env['CUDA_TOOLKIT_PATH'] = ''
env['TF_CUDNN_VERSION'] = ''
env['CUDNN_INSTALL_PATH'] = ''
if self.spec.satisfies('@1.0.0-rc2:'):
env['CC_OPT_FLAGS'] = '-march=x86-64 -mtune=generic'
env['TF_NEED_JEMALLOC'] = '0'
env['TF_NEED_HDFS'] = '0'
env['TF_ENABLE_XLA'] = '0'
env['PYTHON_LIB_PATH'] = self.module.site_packages_dir
env['TF_NEED_OPENCL'] = '0'
# additional config options starting with version 1.2
if self.spec.satisfies('@1.2.0:'):
env['TF_NEED_MKL'] = '0'
env['TF_NEED_VERBS'] = '0'
# additional config options starting with version 1.3
if self.spec.satisfies('@1.3.0:'):
env['TF_NEED_MPI'] = '0'
# additional config options starting with version 1.5
if self.spec.satisfies('@1.5.0:'):
env['TF_NEED_S3'] = '0'
env['TF_NEED_GDR'] = '0'
env['TF_NEED_OPENCL_SYCL'] = '0'
env['TF_SET_ANDROID_WORKSPACE'] = '0'
# env variable is somehow ignored -> brute force
filter_file(r'if workspace_has_any_android_rule\(\)', r'if True', 'configure.py')
# additional config options starting with version 1.6
if self.spec.satisfies('@1.6.0:'):
env['TF_NEED_KAFKA'] = '0'
# additional config options starting with version 1.8
if self.spec.satisfies('@1.8.0:'):
env['TF_DOWNLOAD_CLANG'] = '0'
env['TF_NEED_AWS'] = '0'
if self.spec.satisfies('@1.12.0:'):
env['TF_NEED_IGNITE'] = '0'
env['TF_NEED_ROCM'] = '0'
# boringssl error again, build against openssl instead via TF_SYSTEM_LIBS
# does not work for tf < 1.12.0
# (https://github.com/tensorflow/tensorflow/issues/25283#issuecomment-460124556)
if self.spec.satisfies('+openssl'):
env['TF_SYSTEM_LIBS'] = "boringssl"
# set tmpdir to a non-NFS filesystem (because bazel uses ~/.cache/bazel)
# TODO: This should be checked for non-nfsy filesystem, but the current
# best idea for it is to check
# subprocess.call(['stat', '--file-system', '--format=%T', tmp_path])
# to not be nfs. This is only valid for Linux and we'd like to
# stay at least also OSX compatible
# Note: This particular path below /tmp/spack/tmp is required by the visionary container
# build flow:
tmp_path = env.get('SPACK_TMPDIR', '/tmp/spack') + '/tf'
mkdirp(tmp_path)
env['TEST_TMPDIR'] = tmp_path
env['HOME'] = tmp_path
env["CC"] = env["SPACK_CC"]
env["CXX"] = env["SPACK_CXX"]
configure()
# version dependent fixes
if self.spec.satisfies('@1.3.0:1.5.0'):
# checksum for protobuf that bazel downloads (@github) changed, comment out to avoid error
# better solution: replace wrong checksums in workspace.bzl
# wrong one: 6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93,
# online: e5fdeee6b28cf6c38d61243adff06628baa434a22b5ebb7432d2a7fbabbdb13d
filter_file(r'sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93"',
r'#sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93"',
'tensorflow/workspace.bzl')
# starting with tensorflow 1.3, tensorboard becomes a dependency
# (...but is not really needed? Tensorboard should depend on tensorflow, not the other way!)
# -> remove from list of required packages
filter_file(r"'tensorflow-tensorboard",
r"#'tensorflow-tensorboard",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@1.5.0:'):
# google cloud support seems to be installed on default, leading to boringssl error
# manually set the flag to false to avoid installing gcp support
# (https://github.com/tensorflow/tensorflow/issues/20677#issuecomment-404634519)
filter_file(r'--define with_gcp_support=true',
r'--define with_gcp_support=false',
'.tf_configure.bazelrc')
if self.spec.satisfies('@1.6.0:'):
# tensorboard name changed
filter_file(r"'tensorboard >=",
r"#'tensorboard >=",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@1.8.0:'):
# 1.8.0 and 1.9.0 aborts with numpy import error during python_api generation
# somehow the wrong PYTHONPATH is used...set --distinct_host_configuration=false as a workaround
# (https://github.com/tensorflow/tensorflow/issues/22395#issuecomment-431229451)
filter_file('build --action_env TF_NEED_OPENCL_SYCL="0"',
'build --action_env TF_NEED_OPENCL_SYCL="0"\n'
'build --distinct_host_configuration=false\n'
'build --action_env PYTHONPATH="{0}"'.format(env['PYTHONPATH']),
'.tf_configure.bazelrc')
if self.spec.satisfies('@1.12.0:+openssl'):
# add link to spack-installed openssl libs (needed when spack openssl replaces boringssl)
filter_file('-lssl', '-lssl '+self.spec['openssl'].libs.search_flags, 'third_party/systemlibs/boringssl.BUILD')
filter_file('-lcrypto', '-lcrypto '+self.spec['openssl'].libs.search_flags, 'third_party/systemlibs/boringssl.BUILD')
if self.spec.satisfies('@1.13.1'):
# tensorflow_estimator is an API for tensorflow
# tensorflow-estimator imports tensorflow during build, so tensorflow has to be set up first
filter_file(r"'tensorflow_estimator >=",
r"#'tensorflow_estimator >=",
'tensorflow/tools/pip_package/setup.py')
if self.spec.satisfies('@2.0.0:'):
# now it depends on the nightly versions...
filter_file(r"'tf-estimator-nightly >=",
r"#'tf-estimator-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
r"pass #REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"'tb-nightly >=",
r"#'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
if '+cuda' in spec:
bazel('-c', 'opt', '--config=cuda', '//tensorflow/tools/pip_package:build_pip_package')
else:
bazel('-c', 'opt', '//tensorflow/tools/pip_package:build_pip_package')
build_pip_package = Executable('bazel-bin/tensorflow/tools/pip_package/build_pip_package')
build_pip_package(tmp_path)
# using setup.py for installation
# webpage suggests: sudo pip install /tmp/tensorflow_pkg/tensorflow-0.XYZ.whl
mkdirp('_python_build')
cd('_python_build')
ln = which('ln')
for fn in glob("../bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/*"):
ln('-s', fn, '.')
for fn in glob("../tensorflow/tools/pip_package/*"):
ln('-s', fn, '.')
setup_py('install', '--prefix={0}'.format(prefix))
|
[
"glob.glob"
] |
[((12544, 12647), 'glob.glob', 'glob', (['"""../bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/*"""'], {}), "(\n '../bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/*'\n )\n", (12548, 12647), False, 'from glob import glob\n'), ((12687, 12728), 'glob.glob', 'glob', (['"""../tensorflow/tools/pip_package/*"""'], {}), "('../tensorflow/tools/pip_package/*')\n", (12691, 12728), False, 'from glob import glob\n')]
|
#!/usr/bin/python
#
# Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Additional copyrights may follow
#
import boto3
import botocore
import sys
import re
import os
import json
import tarfile
import hashlib
from io import StringIO
import datetime
import unittest
import mock
import posix
def __unique_assign(releaseinfo, key, value):
if not key in releaseinfo:
releaseinfo[key] = value
elif releaseinfo[key] != value:
raise Exception('Found files from two %ss: %s %s' %
(key, releaseinfo[key], value))
def __compute_hashes(filename):
"""Helper function to compute MD5 and SHA1 hashes"""
retval = {}
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(64 * 1024)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
retval['md5'] = md5.hexdigest()
retval['sha1'] = sha1.hexdigest()
retval['sha256'] = sha256.hexdigest()
return retval
def __query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def parse_versions(filelist):
"""Parse the project name, branch, file basename, and version name from a file list
We're pretty conservative in this function, because it's an
optimization over specifying a bunch of command linke arguments
explicitly. Add projects / regexes as necessary...
"""
releaseinfo = {}
build_unix_time = 0
for filename in filelist:
if re.search(r'openmpi|OpenMPI', filename):
m = re.search(r'openmpi\-([0-9a-zA-Z\.]+)(?:\.tar|\-[0-9]+\.src\.rpm|\.dmg.gz)',
filename)
if m == None:
m = re.search(r'OpenMPI_v([0-9a-zA-Z\.]+)\-[0-9]+_win', filename)
if m == None:
raise Exception('Could not parse Open MPI filename: %s' % (filename))
# yes, we mean open-mpi for the project. We perhaps were
# silly in naming the branch in S3.
__unique_assign(releaseinfo, 'basename', 'openmpi')
__unique_assign(releaseinfo, 'project', 'open-mpi')
__unique_assign(releaseinfo, 'version', m.group(1))
elif re.search('^hwloc-', filename):
m = re.search(r'hwloc\-([0-9a-zA-Z\.]+)(?:\.tar|\-[0-9]+\.src\.rpm)',
filename)
if m == None:
m = re.search(r'hwloc-win[0-9]+-build-([0-9a-zA-Z\.]+)\.zip', filename)
if m == None:
raise Exception('Could not parse hwloc filename: %s' % (filename))
__unique_assign(releaseinfo, 'basename', 'hwloc')
__unique_assign(releaseinfo, 'project', 'hwloc')
__unique_assign(releaseinfo, 'version', m.group(1))
else:
raise Exception('Could not parse %s' % (filename))
m = re.search(r'^[0-9]+\.[0-9]+', releaseinfo['version'])
if m == None:
raise Exception('Could not parse version %s' % (releaseinfo['version']))
__unique_assign(releaseinfo, 'branch', 'v%s' % (m.group(0)))
if build_unix_time == 0 and re.search('\.tar\.', filename):
try:
tar = tarfile.open(filename)
except:
raise
else:
# rather than look at the ctime and mtime of the
# tarball (which may change as tarballs are copied
# around), look at the top level directory (first
# entry in the tarball) for a mtime.
build_unix_time = tar.getmembers()[0].mtime
if build_unix_time != 0:
releaseinfo['build_unix_time'] = build_unix_time
return releaseinfo
def upload_files(s3_client, s3_bucket, s3_key_prefix, release_info, files, prompt):
# first, verify that the key_prefix exists. We are chicken here
# and won't create it.
result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = s3_key_prefix)
if s3_bucket != 'open-mpi-scratch' and result['KeyCount'] == 0:
raise Exception('s3://%s/%s does not appear to be a valid prefix.' %
(s3_bucket, full_key_prefix))
# figure out if project and branch exist...
new = ""
project_key_path = '%s/%s' % (s3_key_prefix, release_info['project'])
branch_key_path = '%s/%s' % (project_key_path, release_info['branch'])
# print some release info
print('Upload path: s3://%s/%s' % (s3_bucket, branch_key_path))
print('Project: %s' % release_info['project'])
print('Version: %s' % release_info['version'])
print('Branch: %s' % release_info['branch'])
print('Date: %s' % datetime.datetime.fromtimestamp(release_info['build_unix_time']))
branch_result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = branch_key_path)
if branch_result['KeyCount'] == 0:
project_result = s3_client.list_objects_v2(Bucket = s3_bucket,
Prefix = project_key_path)
if project_result['KeyCount'] == 0:
print(' * New project %s and branch %s' %
(release_info['project'], release_info['branch']))
else:
print(' * New branch %s' % (release_info['branch']))
# and check for existing release
build_filename = '%s/build-%s-%s.json' % (branch_key_path, release_info['basename'],
release_info['version'])
try:
response = s3_client.get_object(Bucket = s3_bucket, Key = build_filename)
buildinfo = json.load(response['Body'])
buildinfo_found = True
except botocore.exceptions.ClientError as e:
code = e.response['Error']['Code']
if code == 'NoSuchKey':
buildinfo_found = False
else:
raise
buildinfo = {}
buildinfo['files'] = {}
# check if we would overwrite a file and verify that would be ok...
will_overwrite = False
if buildinfo_found:
print('Existing release found for %s %s' %
(release_info['basename'], release_info['version']))
print(' * Existing files that will not change:')
for filename in buildinfo['files']:
if not filename in files:
print(' - %s' % filename)
print(' * Existing files that will be overwritten:')
for filename in buildinfo['files']:
if filename in files:
will_overwrite = True
print(' - %s' % filename)
print(' * New files:')
for filename in files:
filename = os.path.basename(filename)
if not filename in buildinfo['files']:
print(' - %s' % filename)
else:
print('New release for %s %s' %
(release_info['basename'], release_info['version']))
print(' * Files to upload:')
for filename in files:
filename = os.path.basename(filename)
print(' - %s' % filename)
print('')
if prompt == 'ALWAYS_PROMPT':
if not __query_yes_no('Continue?', 'no'):
print('Aborting due to user selection')
return
elif prompt == 'NO_OVERWRITE':
if will_overwrite:
print('Aborting due to --yes and file overwrite')
return
elif prompt == 'NEVER_PROMPT':
pass
elif prompt == 'ASSUME_NO':
print('Aborting due to ASSUME_NO')
return
else:
raise Exception('Unknown Prompt value %d' % prompt)
# build a build-info structure for the release, possibly building
# on the old one...
buildinfo['branch'] = release_info['branch']
buildinfo['valid'] = True
buildinfo['revision'] = release_info['version']
buildinfo['build_unix_time'] = release_info['build_unix_time']
buildinfo['delete_on'] = 0
for filename in files:
info = os.stat(filename)
hashes = __compute_hashes(filename)
fileinfo = {}
fileinfo['sha1'] = hashes['sha1']
fileinfo['sha256'] = hashes['sha256']
fileinfo['md5'] = hashes['md5']
fileinfo['size'] = info.st_size
buildinfo['files'][os.path.basename(filename)] = fileinfo
for filename in files:
target_name = '%s/%s' % (branch_key_path, os.path.basename(filename))
s3_client.upload_file(filename, s3_bucket, target_name)
buildinfo_str = json.dumps(buildinfo)
s3_client.put_object(Bucket = s3_bucket, Key = build_filename,
Body = buildinfo_str)
######################################################################
#
# Unit Test Code
#
######################################################################
def _test_stat(filename):
info = posix.stat_result((0, 0, 0, 0, 0, 0, 987654, 0, 0, 0))
return info
def _test_compute_hashes(filename):
retval = {}
retval['md5'] = "ABC"
retval['sha1'] = "ZYX"
return retval
class _test_tarfile():
def __init__(self):
pass
def getmembers(self):
info = tarfile.TarInfo
info.mtime = 12345
return [info]
@classmethod
def open(cls, filename):
return _test_tarfile()
class parse_versions_tests(unittest.TestCase):
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_release(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-1.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
self.assertEqual(releaseinfo['build_unix_time'], 12345,
str(releaseinfo['build_unix_time']) + " != 12345")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_release_second_srpm(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-2.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_binaries(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.0.tar.bz2",
"openmpi-1.4.0-1.src.rpm",
"openmpi-1.4.0.dmg.gz",
"OpenMPI_v1.4.0-1_win64.exe"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_prerelease(self):
filelist = ["openmpi-1.4.0rc1.tar.gz",
"openmpi-1.4.0rc1.tar.bz2",
"openmpi-1.4.0rc1-1.src.rpm"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "open-mpi",
releaseinfo['project'] + " != open-mpi")
self.assertEqual(releaseinfo['basename'], "openmpi",
releaseinfo['basename'] + " != openmpi")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0rc1",
releaseinfo['version'] + " != 1.4.0rc1")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_ompi_mixed_versions(self):
filelist = ["openmpi-1.4.0.tar.gz",
"openmpi-1.4.1.tar.bz2",
"openmpi-1.4.0-1.src.rpm"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_release(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.0.tar.bz2",
"hwloc-win32-build-1.4.0.zip",
"hwloc-win64-build-1.4.0.zip"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "hwloc",
releaseinfo['project'] + " != hwloc")
self.assertEqual(releaseinfo['basename'], "hwloc",
releaseinfo['basename'] + " != hwloc")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0",
releaseinfo['version'] + " != 1.4.0")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_prerelease(self):
filelist = ["hwloc-1.4.0rc1.tar.gz",
"hwloc-1.4.0rc1.tar.bz2",
"hwloc-win32-build-1.4.0rc1.zip",
"hwloc-win64-build-1.4.0rc1.zip"]
releaseinfo = parse_versions(filelist)
self.assertEqual(releaseinfo['project'], "hwloc",
releaseinfo['project'] + " != hwloc")
self.assertEqual(releaseinfo['basename'], "hwloc",
releaseinfo['basename'] + " != hwloc")
self.assertEqual(releaseinfo['branch'], "v1.4",
releaseinfo['branch'] + " != v1.4")
self.assertEqual(releaseinfo['version'], "1.4.0rc1",
releaseinfo['version'] + " != 1.4.0rc1")
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_mixed_versions(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.1.tar.bz2",
"hwloc-win32-build-1.4.0.zip",
"hwloc-win64-build-1.4.0.zip"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
@mock.patch('tarfile.open', _test_tarfile.open)
def test_hwloc_mixed_versions2(self):
filelist = ["hwloc-1.4.0.tar.gz",
"hwloc-1.4.0.tar.bz2",
"hwloc-win32-build-1.4.1.zip",
"hwloc-win64-build-1.4.0.zip"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
# we didn't teach the parser about netloc (because it's dead), so
# this should fail
def test_netloc(self):
filelist = ["netloc-1.4.0.tar.gz",
"netloc-1.4.0.tar.bz2"]
try:
releaseinfo = parse_versions(filelist)
except Exception as e:
pass
else:
self.fail()
class upload_files_tests(unittest.TestCase):
class test_s3_client():
def __init__(self, path, Existing = False):
self._readcount = 0
self._file_write_list = []
self._stream_write = ""
self._path = path
self._existing = Existing
def get_object(self, Bucket, Key):
self._readcount += 1
result = {}
if not self._existing or Key != self._path + 'build-openmpi-100.0.0rho1.json':
response = {}
response['Error'] = {}
response['Error']['Code'] = 'NoSuchKey'
raise botocore.exceptions.ClientError(response, 'get_object')
buildinfo = {}
buildinfo['branch'] = 'v100.0'
buildinfo['valid'] = True
buildinfo['revision'] = '100.0.0rho1'
buildinfo['build_unix_time'] = 314314
buildinfo['delete_on'] = 0
buildinfo['files'] = {}
fileinfo = {}
fileinfo['sha1'] = 'abc'
fileinfo['md5'] = 'zyx'
fileinfo['size'] = 1024
buildinfo['files']['openmpi-100.0.0rho1.tar.bz2'] = fileinfo
result['Body'] = StringIO(json.dumps(buildinfo))
return result
def list_objects_v2(self, Bucket, Prefix):
self._readcount += 1
result = {}
if self._path.startswith(Prefix):
result['KeyCount'] = 1
else:
result['KeyCount'] = 0
return result
def upload_file(self, Filename, Bucket, Key):
assert(Key.startswith(self._path))
self._file_write_list.append(Key)
def put_object(self, Bucket, Key, Body):
assert(Key.startswith(self._path))
self._file_write_list.append(Key)
self._stream_write += Body
def get_readcount(self):
return self._readcount
def get_write_list(self):
return self._file_write_list
def get_write_stream(self):
return self._stream_write
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_new_buildinfo(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 12345
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = False)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 3,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
def test_existing_buildinfo_nocontinue(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'ASSUME_NO')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 0,
"Unexpected write list length: %s" % str(client.get_write_list()))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_nooverlap(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 2,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_overlap_ok(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NEVER_PROMPT')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 3,
"Unexpected write list length: %s" % str(client.get_write_list()))
buildinfo = json.loads(client.get_write_stream())
self.assertEqual(len(buildinfo['files']), 2,
'Unexpected files length: %s' % str(buildinfo['files']))
@mock.patch('os.stat', _test_stat)
@mock.patch('__main__.__compute_hashes', _test_compute_hashes)
def test_existing_buildinfo_overlap_fail(self):
releaseinfo = {}
releaseinfo['project'] = 'open-mpi'
releaseinfo['branch'] = 'v100.0'
releaseinfo['version'] = '100.0.0rho1'
releaseinfo['basename'] = 'openmpi'
releaseinfo['build_unix_time'] = 1
files = ['openmpi-100.0.0rho1.tar.gz', 'openmpi-100.0.0rho1.tar.bz2']
client = self.test_s3_client("scratch/open-mpi/v100.0/", Existing = True)
upload_files(client, 'open-mpi-scratch', 'scratch',
releaseinfo, files, 'NO_OVERWRITE')
self.assertEqual(client.get_readcount(), 3,
"readcount was %d, expected 3" % (client.get_readcount()))
self.assertEqual(len(client.get_write_list()), 0,
"Unexpected write list length: %s" % str(client.get_write_list()))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"sys.stdout.write",
"botocore.exceptions.ClientError",
"hashlib.md5",
"json.load",
"hashlib.sha1",
"os.stat",
"os.path.basename",
"mock.patch",
"json.dumps",
"hashlib.sha256",
"datetime.datetime.fromtimestamp",
"tarfile.open",
"posix.stat_result",
"re.search"
] |
[((733, 746), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (744, 746), False, 'import hashlib\n'), ((758, 772), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (770, 772), False, 'import hashlib\n'), ((786, 802), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (800, 802), False, 'import hashlib\n'), ((9695, 9716), 'json.dumps', 'json.dumps', (['buildinfo'], {}), '(buildinfo)\n', (9705, 9716), False, 'import json\n'), ((10033, 10087), 'posix.stat_result', 'posix.stat_result', (['(0, 0, 0, 0, 0, 0, 987654, 0, 0, 0)'], {}), '((0, 0, 0, 0, 0, 0, 987654, 0, 0, 0))\n', (10050, 10087), False, 'import posix\n'), ((10529, 10575), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (10539, 10575), False, 'import mock\n'), ((11430, 11476), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (11440, 11476), False, 'import mock\n'), ((12203, 12249), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (12213, 12249), False, 'import mock\n'), ((13059, 13105), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (13069, 13105), False, 'import mock\n'), ((13838, 13884), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (13848, 13884), False, 'import mock\n'), ((14217, 14263), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (14227, 14263), False, 'import mock\n'), ((15020, 15066), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (15030, 15066), False, 'import mock\n'), ((15844, 15890), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (15854, 15890), False, 'import mock\n'), ((16275, 16321), 'mock.patch', 'mock.patch', (['"""tarfile.open"""', '_test_tarfile.open'], {}), "('tarfile.open', _test_tarfile.open)\n", (16285, 16321), False, 'import mock\n'), ((19179, 19212), 'mock.patch', 'mock.patch', (['"""os.stat"""', '_test_stat'], {}), "('os.stat', _test_stat)\n", (19189, 19212), False, 'import mock\n'), ((19218, 19279), 'mock.patch', 'mock.patch', (['"""__main__.__compute_hashes"""', '_test_compute_hashes'], {}), "('__main__.__compute_hashes', _test_compute_hashes)\n", (19228, 19279), False, 'import mock\n'), ((21157, 21190), 'mock.patch', 'mock.patch', (['"""os.stat"""', '_test_stat'], {}), "('os.stat', _test_stat)\n", (21167, 21190), False, 'import mock\n'), ((21196, 21257), 'mock.patch', 'mock.patch', (['"""__main__.__compute_hashes"""', '_test_compute_hashes'], {}), "('__main__.__compute_hashes', _test_compute_hashes)\n", (21206, 21257), False, 'import mock\n'), ((22287, 22320), 'mock.patch', 'mock.patch', (['"""os.stat"""', '_test_stat'], {}), "('os.stat', _test_stat)\n", (22297, 22320), False, 'import mock\n'), ((22326, 22387), 'mock.patch', 'mock.patch', (['"""__main__.__compute_hashes"""', '_test_compute_hashes'], {}), "('__main__.__compute_hashes', _test_compute_hashes)\n", (22336, 22387), False, 'import mock\n'), ((23448, 23481), 'mock.patch', 'mock.patch', (['"""os.stat"""', '_test_stat'], {}), "('os.stat', _test_stat)\n", (23458, 23481), False, 'import mock\n'), ((23487, 23548), 'mock.patch', 'mock.patch', (['"""__main__.__compute_hashes"""', '_test_compute_hashes'], {}), "('__main__.__compute_hashes', _test_compute_hashes)\n", (23497, 23548), False, 'import mock\n'), ((24444, 24459), 'unittest.main', 'unittest.main', ([], {}), '()\n', (24457, 24459), False, 'import unittest\n'), ((1937, 1972), 'sys.stdout.write', 'sys.stdout.write', (['(question + prompt)'], {}), '(question + prompt)\n', (1953, 1972), False, 'import sys\n'), ((2688, 2726), 're.search', 're.search', (['"""openmpi|OpenMPI"""', 'filename'], {}), "('openmpi|OpenMPI', filename)\n", (2697, 2726), False, 'import re\n'), ((4069, 4122), 're.search', 're.search', (['"""^[0-9]+\\\\.[0-9]+"""', "releaseinfo['version']"], {}), "('^[0-9]+\\\\.[0-9]+', releaseinfo['version'])\n", (4078, 4122), False, 'import re\n'), ((6861, 6888), 'json.load', 'json.load', (["response['Body']"], {}), "(response['Body'])\n", (6870, 6888), False, 'import json\n'), ((9186, 9203), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (9193, 9203), False, 'import os\n'), ((2745, 2846), 're.search', 're.search', (['"""openmpi\\\\-([0-9a-zA-Z\\\\.]+)(?:\\\\.tar|\\\\-[0-9]+\\\\.src\\\\.rpm|\\\\.dmg.gz)"""', 'filename'], {}), "(\n 'openmpi\\\\-([0-9a-zA-Z\\\\.]+)(?:\\\\.tar|\\\\-[0-9]+\\\\.src\\\\.rpm|\\\\.dmg.gz)',\n filename)\n", (2754, 2846), False, 'import re\n'), ((3410, 3440), 're.search', 're.search', (['"""^hwloc-"""', 'filename'], {}), "('^hwloc-', filename)\n", (3419, 3440), False, 'import re\n'), ((4336, 4368), 're.search', 're.search', (['"""\\\\.tar\\\\."""', 'filename'], {}), "('\\\\.tar\\\\.', filename)\n", (4345, 4368), False, 'import re\n'), ((5919, 5983), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["release_info['build_unix_time']"], {}), "(release_info['build_unix_time'])\n", (5950, 5983), False, 'import datetime\n'), ((7901, 7927), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7917, 7927), False, 'import os\n'), ((8231, 8257), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (8247, 8257), False, 'import os\n'), ((9465, 9491), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9481, 9491), False, 'import os\n'), ((2178, 2250), 'sys.stdout.write', 'sys.stdout.write', (['"""Please respond with \'yes\' or \'no\' (or \'y\' or \'n\').\n"""'], {}), '("Please respond with \'yes\' or \'no\' (or \'y\' or \'n\').\\n")\n', (2194, 2250), False, 'import sys\n'), ((2903, 2965), 're.search', 're.search', (['"""OpenMPI_v([0-9a-zA-Z\\\\.]+)\\\\-[0-9]+_win"""', 'filename'], {}), "('OpenMPI_v([0-9a-zA-Z\\\\.]+)\\\\-[0-9]+_win', filename)\n", (2912, 2965), False, 'import re\n'), ((3458, 3543), 're.search', 're.search', (['"""hwloc\\\\-([0-9a-zA-Z\\\\.]+)(?:\\\\.tar|\\\\-[0-9]+\\\\.src\\\\.rpm)"""', 'filename'], {}), "('hwloc\\\\-([0-9a-zA-Z\\\\.]+)(?:\\\\.tar|\\\\-[0-9]+\\\\.src\\\\.rpm)', filename\n )\n", (3467, 3543), False, 'import re\n'), ((4407, 4429), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (4419, 4429), False, 'import tarfile\n'), ((9582, 9608), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9598, 9608), False, 'import os\n'), ((17702, 17757), 'botocore.exceptions.ClientError', 'botocore.exceptions.ClientError', (['response', '"""get_object"""'], {}), "(response, 'get_object')\n", (17733, 17757), False, 'import botocore\n'), ((18288, 18309), 'json.dumps', 'json.dumps', (['buildinfo'], {}), '(buildinfo)\n', (18298, 18309), False, 'import json\n'), ((3605, 3673), 're.search', 're.search', (['"""hwloc-win[0-9]+-build-([0-9a-zA-Z\\\\.]+)\\\\.zip"""', 'filename'], {}), "('hwloc-win[0-9]+-build-([0-9a-zA-Z\\\\.]+)\\\\.zip', filename)\n", (3614, 3673), False, 'import re\n')]
|
from ethereum import tester, vm
from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex
from ethereum.state_transition import apply_message
s = tester.state()
c = s.contract('eip_96_blockhash_getter.se.py')
blockhash_addr = b'\x00' * 19 + b'\x10'
system_addr = b'\xff' * 19 + b'\xfe'
s.state.set_code(blockhash_addr, s.state.get_code(c))
def mk_hash_setting_message(data):
return vm.Message(sender=system_addr, to=blockhash_addr, value=0, gas=1000000, data=data)
print("Setting block hashes")
for i in range(1, 1000):
s.state.block_number = i + 1
o = apply_message(s.state, mk_hash_setting_message(sha3(str(i))))
if i % 100 == 0:
print("Set %d" % i)
print("Testing reads")
s.state.block_number = 1000
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(999)) == sha3(str(999))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(998)) == sha3(str(998))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(744)) == sha3(str(744))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(743)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1000)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(1001)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(513)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(512)) == sha3(str(512))
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(511)) == b'\x00' * 32
assert s.send(tester.k0, blockhash_addr, 0, encode_int32(256)) == sha3(str(256))
print("Tests passed!")
print("EVM code: 0x%s" % encode_hex(s.state.get_code(blockhash_addr)))
|
[
"ethereum.utils.encode_int32",
"ethereum.vm.Message",
"ethereum.tester.state"
] |
[((156, 170), 'ethereum.tester.state', 'tester.state', ([], {}), '()\n', (168, 170), False, 'from ethereum import tester, vm\n'), ((397, 483), 'ethereum.vm.Message', 'vm.Message', ([], {'sender': 'system_addr', 'to': 'blockhash_addr', 'value': '(0)', 'gas': '(1000000)', 'data': 'data'}), '(sender=system_addr, to=blockhash_addr, value=0, gas=1000000,\n data=data)\n', (407, 483), False, 'from ethereum import tester, vm\n'), ((784, 801), 'ethereum.utils.encode_int32', 'encode_int32', (['(999)'], {}), '(999)\n', (796, 801), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((865, 882), 'ethereum.utils.encode_int32', 'encode_int32', (['(998)'], {}), '(998)\n', (877, 882), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((946, 963), 'ethereum.utils.encode_int32', 'encode_int32', (['(744)'], {}), '(744)\n', (958, 963), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1027, 1044), 'ethereum.utils.encode_int32', 'encode_int32', (['(743)'], {}), '(743)\n', (1039, 1044), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1106, 1124), 'ethereum.utils.encode_int32', 'encode_int32', (['(1000)'], {}), '(1000)\n', (1118, 1124), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1186, 1204), 'ethereum.utils.encode_int32', 'encode_int32', (['(1001)'], {}), '(1001)\n', (1198, 1204), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1266, 1283), 'ethereum.utils.encode_int32', 'encode_int32', (['(513)'], {}), '(513)\n', (1278, 1283), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1345, 1362), 'ethereum.utils.encode_int32', 'encode_int32', (['(512)'], {}), '(512)\n', (1357, 1362), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1426, 1443), 'ethereum.utils.encode_int32', 'encode_int32', (['(511)'], {}), '(511)\n', (1438, 1443), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n'), ((1505, 1522), 'ethereum.utils.encode_int32', 'encode_int32', (['(256)'], {}), '(256)\n', (1517, 1522), False, 'from ethereum.utils import sha3, encode_int32, safe_ord, encode_hex\n')]
|
from django_docutils.lib.utils import chop_after_docinfo, chop_after_title
def test_chop_after_title():
content = """=============================================
Learn JavaScript for free: The best resources
=============================================
first section
-------------
some content
""".strip()
result = chop_after_title(content)
expected = """
first section
-------------
some content""".strip()
assert result == expected
def test_chop_after_docinfo():
before = """
===========
Content ok!
===========
:programming_languages: javascript
:topics: webpack
:Created: 2017-07-30
:Author: tony
more text
first section
-------------
some content
""".strip()
after = """
more text
first section
-------------
some content
""".strip()
assert chop_after_docinfo(before) == after
# test docinfo handles spaces in values
assert (
chop_after_docinfo(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
Content
-------
hi
""".strip()
)
== """
Content
-------
hi""".strip()
)
|
[
"django_docutils.lib.utils.chop_after_docinfo",
"django_docutils.lib.utils.chop_after_title"
] |
[((330, 355), 'django_docutils.lib.utils.chop_after_title', 'chop_after_title', (['content'], {}), '(content)\n', (346, 355), False, 'from django_docutils.lib.utils import chop_after_docinfo, chop_after_title\n'), ((794, 820), 'django_docutils.lib.utils.chop_after_docinfo', 'chop_after_docinfo', (['before'], {}), '(before)\n', (812, 820), False, 'from django_docutils.lib.utils import chop_after_docinfo, chop_after_title\n')]
|
# to run this test, from directory above:
# setenv PYTHONPATH /path/to/pyradiomics/radiomics
# nosetests --nocapture -v tests/test_docstrings.py
import logging
from nose_parameterized import parameterized
import six
from radiomics import getFeatureClasses
from testUtils import custom_name_func
featureClasses = getFeatureClasses()
def setup_module(module):
# runs before anything in this file
print("") # this is to get a newline after the dots
return
class TestDocStrings:
def setup(self):
# setup before each test method
print("") # this is to get a newline after the dots
@classmethod
def setup_class(self):
# called before any methods in this class
print("") # this is to get a newline after the dots
@classmethod
def teardown_class(self):
# run after any methods in this class
print("") # this is to get a newline after the dots
def generate_scenarios():
global featureClasses
for featureClassName, featureClass in six.iteritems(featureClasses):
logging.info('generate_scenarios %s', featureClassName)
doc = featureClass.__doc__
assert(doc is not None)
featureNames = featureClass.getFeatureNames()
for f in featureNames:
yield (featureClassName, f)
@parameterized.expand(generate_scenarios(), testcase_func_name=custom_name_func)
def test_class(self, featureClassName, featureName):
global featureClasses
logging.info('%s', featureName)
features = featureClasses[featureClassName]
doc = getattr(features, "get%sFeatureValue" % featureName).__doc__
logging.info('%s', doc)
assert(doc is not None)
|
[
"logging.info",
"six.iteritems",
"radiomics.getFeatureClasses"
] |
[((316, 335), 'radiomics.getFeatureClasses', 'getFeatureClasses', ([], {}), '()\n', (333, 335), False, 'from radiomics import getFeatureClasses\n'), ((1027, 1056), 'six.iteritems', 'six.iteritems', (['featureClasses'], {}), '(featureClasses)\n', (1040, 1056), False, 'import six\n'), ((1490, 1521), 'logging.info', 'logging.info', (['"""%s"""', 'featureName'], {}), "('%s', featureName)\n", (1502, 1521), False, 'import logging\n'), ((1651, 1674), 'logging.info', 'logging.info', (['"""%s"""', 'doc'], {}), "('%s', doc)\n", (1663, 1674), False, 'import logging\n'), ((1066, 1121), 'logging.info', 'logging.info', (['"""generate_scenarios %s"""', 'featureClassName'], {}), "('generate_scenarios %s', featureClassName)\n", (1078, 1121), False, 'import logging\n')]
|
"""Device RabbitMQ messages module."""
import json
import logging
import time
import pika
from fm_server.settings import get_config
LOGGER = logging.getLogger("fm.device.rabbitmq")
def get_connection(config=None):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info("Connecting to RabbitMQ")
if not config:
config = get_config()
user = config.RABBITMQ_USER
password = config.RABBITMQ_PASSWORD
virtual_host = config.RABBITMQ_VHOST
host = config.RABBITMQ_HOST
port = config.RABBITMQ_PORT
creds = pika.PlainCredentials(user, password)
params = pika.ConnectionParameters(
host=host, port=port, virtual_host=virtual_host, credentials=creds
)
return pika.BlockingConnection(parameters=params)
def send_create_message(destination="all"):
"""Send a create message to 'destination' devices."""
config = get_config()
connection = get_connection(config=config)
channel = connection.channel()
exchange_name = config.RABBITMQ_MESSAGES_EXCHANGE_NAME
exchange_type = config.RABBITMQ_MESSAGES_EXCHANGE_TYPE
channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)
routing_key = destination + ".create"
message = {"command": "create"}
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(message, ensure_ascii=True),
)
LOGGER.debug(f"Sent message with key:{routing_key} to {exchange_name} exchange")
connection.close()
def get_device_status(device_id):
"""Get the device status from the heartbeat service for a given device_id."""
config = get_config()
connection = get_connection(config=config)
channel = connection.channel()
exchange_name = config.RABBITMQ_MESSAGES_EXCHANGE_NAME
exchange_type = config.RABBITMQ_MESSAGES_EXCHANGE_TYPE
channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type)
method_frame = channel.queue_declare(queue="", exclusive=True, auto_delete=True)
reply_queue = method_frame.method.queue
properties = pika.BasicProperties(
content_type="application/json", reply_to=reply_queue
)
routing_key = "_internal"
message = {"command": "device_status", "id": device_id}
channel.basic_publish(
exchange=exchange_name,
routing_key=routing_key,
body=json.dumps(message, ensure_ascii=True),
properties=properties,
)
LOGGER.info(f"Sent request for {device_id } status to {exchange_name} exchange")
attempts = 0
while attempts < 5:
# pylint: disable=unused-variable
method_frame, header_frame, body = channel.basic_get(reply_queue)
if method_frame:
connection.close()
state = str(body, "utf-8")
LOGGER.info(f"Returned status is {state}")
return state
LOGGER.debug("No return message received yet")
time.sleep(1)
attempts += 1
LOGGER.warning("No return message received for device status message request")
connection.close()
return "disconnected"
|
[
"pika.PlainCredentials",
"pika.ConnectionParameters",
"logging.getLogger",
"time.sleep",
"json.dumps",
"pika.BasicProperties",
"fm_server.settings.get_config",
"pika.BlockingConnection"
] |
[((153, 192), 'logging.getLogger', 'logging.getLogger', (['"""fm.device.rabbitmq"""'], {}), "('fm.device.rabbitmq')\n", (170, 192), False, 'import logging\n'), ((750, 787), 'pika.PlainCredentials', 'pika.PlainCredentials', (['user', 'password'], {}), '(user, password)\n', (771, 787), False, 'import pika\n'), ((802, 899), 'pika.ConnectionParameters', 'pika.ConnectionParameters', ([], {'host': 'host', 'port': 'port', 'virtual_host': 'virtual_host', 'credentials': 'creds'}), '(host=host, port=port, virtual_host=virtual_host,\n credentials=creds)\n', (827, 899), False, 'import pika\n'), ((924, 966), 'pika.BlockingConnection', 'pika.BlockingConnection', ([], {'parameters': 'params'}), '(parameters=params)\n', (947, 966), False, 'import pika\n'), ((1091, 1103), 'fm_server.settings.get_config', 'get_config', ([], {}), '()\n', (1101, 1103), False, 'from fm_server.settings import get_config\n'), ((1883, 1895), 'fm_server.settings.get_config', 'get_config', ([], {}), '()\n', (1893, 1895), False, 'from fm_server.settings import get_config\n'), ((2338, 2413), 'pika.BasicProperties', 'pika.BasicProperties', ([], {'content_type': '"""application/json"""', 'reply_to': 'reply_queue'}), "(content_type='application/json', reply_to=reply_queue)\n", (2358, 2413), False, 'import pika\n'), ((538, 550), 'fm_server.settings.get_config', 'get_config', ([], {}), '()\n', (548, 550), False, 'from fm_server.settings import get_config\n'), ((3210, 3223), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3220, 3223), False, 'import time\n'), ((1588, 1626), 'json.dumps', 'json.dumps', (['message'], {'ensure_ascii': '(True)'}), '(message, ensure_ascii=True)\n', (1598, 1626), False, 'import json\n'), ((2633, 2671), 'json.dumps', 'json.dumps', (['message'], {'ensure_ascii': '(True)'}), '(message, ensure_ascii=True)\n', (2643, 2671), False, 'import json\n')]
|
import pytest
import re
from pytest_mock import mocker
import flask
import flask.sessions
from flask_dynamodb_sessions import Session
def test_session_boto_settings(mocker):
client_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
app = flask.Flask(__name__)
app.config.update(
SESSION_DYNAMODB_REGION='bogus-region',
SESSION_DYNAMODB_ENDPOINT='http://bogus:1234'
)
def create_test_app(**kwargs):
app = flask.Flask(__name__)
app.config.update(**kwargs)
Session(app)
@app.route('/test_route')
def test_route():
flask.session['x'] = 'foo'
return flask.make_response('', 200)
return app
def test_save_uses_header(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'update_item')
response = app.test_client().get('/test_route')
# Find the session ID that was passed to update_item()
session_id = None
match = re.search("Key={'id': {'S': '(.+?)'}}", str(boto_mock_instance.update_item.call_args))
if match:
session_id = match.group(1)
assert 'X-SessionId' in response.headers
assert response.headers['X-SessionId'] == session_id
assert 'Set-Cookie' not in response.headers
def test_read_uses_header(mocker):
expected_session_id = 'foobar'
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': expected_session_id})
# Find the session ID that was passed to get_item()
actual_session_id = None
match = re.search("Key={'id': {'S': '(.+?)'}}", str(boto_mock_instance.get_item.call_args))
if match:
actual_session_id = match.group(1)
assert actual_session_id == expected_session_id
def test_consistent_read_default_false(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': 'foo'})
# Validate ConsistentRead setting
assert 'ConsistentRead=False' in str(boto_mock_instance.get_item.call_args)
def test_consistent_read_true(mocker):
boto_mock = mocker.patch('flask_dynamodb_sessions.boto3.client')
boto_mock_instance = boto_mock()
boto_mock_instance.get_item.return_value = {'Item': {'data': ''}}
app = create_test_app(
SESSION_DYNAMODB_USE_HEADER=True,
SESSION_DYNAMODB_CONSISTENT_READ=True
)
mocker.spy(boto_mock, 'get_item')
response = app.test_client().get('/test_route', headers={'X-SessionId': 'foo'})
# Validate ConsistentRead setting
assert 'ConsistentRead=True' in str(boto_mock_instance.get_item.call_args)
|
[
"flask.Flask",
"pytest_mock.mocker.spy",
"pytest_mock.mocker.patch",
"flask.make_response",
"flask_dynamodb_sessions.Session"
] |
[((195, 247), 'pytest_mock.mocker.patch', 'mocker.patch', (['"""flask_dynamodb_sessions.boto3.client"""'], {}), "('flask_dynamodb_sessions.boto3.client')\n", (207, 247), False, 'from pytest_mock import mocker\n'), ((259, 280), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (270, 280), False, 'import flask\n'), ((456, 477), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (467, 477), False, 'import flask\n'), ((514, 526), 'flask_dynamodb_sessions.Session', 'Session', (['app'], {}), '(app)\n', (521, 526), False, 'from flask_dynamodb_sessions import Session\n'), ((728, 780), 'pytest_mock.mocker.patch', 'mocker.patch', (['"""flask_dynamodb_sessions.boto3.client"""'], {}), "('flask_dynamodb_sessions.boto3.client')\n", (740, 780), False, 'from pytest_mock import mocker\n'), ((897, 933), 'pytest_mock.mocker.spy', 'mocker.spy', (['boto_mock', '"""update_item"""'], {}), "(boto_mock, 'update_item')\n", (907, 933), False, 'from pytest_mock import mocker\n'), ((1457, 1509), 'pytest_mock.mocker.patch', 'mocker.patch', (['"""flask_dynamodb_sessions.boto3.client"""'], {}), "('flask_dynamodb_sessions.boto3.client')\n", (1469, 1509), False, 'from pytest_mock import mocker\n'), ((1696, 1729), 'pytest_mock.mocker.spy', 'mocker.spy', (['boto_mock', '"""get_item"""'], {}), "(boto_mock, 'get_item')\n", (1706, 1729), False, 'from pytest_mock import mocker\n'), ((2187, 2239), 'pytest_mock.mocker.patch', 'mocker.patch', (['"""flask_dynamodb_sessions.boto3.client"""'], {}), "('flask_dynamodb_sessions.boto3.client')\n", (2199, 2239), False, 'from pytest_mock import mocker\n'), ((2426, 2459), 'pytest_mock.mocker.spy', 'mocker.spy', (['boto_mock', '"""get_item"""'], {}), "(boto_mock, 'get_item')\n", (2436, 2459), False, 'from pytest_mock import mocker\n'), ((2721, 2773), 'pytest_mock.mocker.patch', 'mocker.patch', (['"""flask_dynamodb_sessions.boto3.client"""'], {}), "('flask_dynamodb_sessions.boto3.client')\n", (2733, 2773), False, 'from pytest_mock import mocker\n'), ((3007, 3040), 'pytest_mock.mocker.spy', 'mocker.spy', (['boto_mock', '"""get_item"""'], {}), "(boto_mock, 'get_item')\n", (3017, 3040), False, 'from pytest_mock import mocker\n'), ((630, 658), 'flask.make_response', 'flask.make_response', (['""""""', '(200)'], {}), "('', 200)\n", (649, 658), False, 'import flask\n')]
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('..'))
from uplink.add_entry import add_entry
class TestAddEntry(unittest.TestCase):
def test_one_one(self):
self.assertEqual(1 + 1, 2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.abspath"
] |
[((57, 78), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (72, 78), False, 'import os\n'), ((255, 270), 'unittest.main', 'unittest.main', ([], {}), '()\n', (268, 270), False, 'import unittest\n')]
|
"""
PyCLES
Desc: This is an implementation of the Common Language Effect Size (CLES) in Python
Author: <NAME>
Date: 04/05/20
"""
import numpy as np
from scipy.stats import norm
def nonparametric_cles(a, b, half_credit=True) -> float:
"""Nonparametric solver for the common language effect size. This solves
for the probability that a random draw from `a` will be greater than a random
draw from `b` using a brute force approach.
If half_credit=True then equal values between vectors will be granted half points.
e.g.
nonparametric_cles([0, 1], [0, 0], True) >> 0.75
nonparametric_cles([0, 1], [0, 0], False) >> 0.5
nonparametric_cles([1, 1], [0, 0]) >> 1.0
nonparametric_cles([0, 0], [1, 1]) >> 0.0
"""
m = np.subtract.outer(a, b)
m = np.sign(m)
if half_credit:
m = np.where(m == 0, 0.5, m)
m = np.where(m == -1, 0, m)
return np.mean(m)
def parametric_cles(a, b):
"""Parametric solver for the common language effect size. This function
assumes that your data is normally distributed. It returns the probability
that a random draw from `a` will be greater than a random draw from `b` using
the normal cumulative distribution function."""
ma, mb = np.mean(a), np.mean(b)
sd = np.sqrt(ma**2 + mb**2)
return norm.cdf(x=0, loc=mb-ma, scale=sd)
|
[
"numpy.subtract.outer",
"scipy.stats.norm.cdf",
"numpy.where",
"numpy.mean",
"numpy.sign",
"numpy.sqrt"
] |
[((789, 812), 'numpy.subtract.outer', 'np.subtract.outer', (['a', 'b'], {}), '(a, b)\n', (806, 812), True, 'import numpy as np\n'), ((821, 831), 'numpy.sign', 'np.sign', (['m'], {}), '(m)\n', (828, 831), True, 'import numpy as np\n'), ((902, 925), 'numpy.where', 'np.where', (['(m == -1)', '(0)', 'm'], {}), '(m == -1, 0, m)\n', (910, 925), True, 'import numpy as np\n'), ((942, 952), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (949, 952), True, 'import numpy as np\n'), ((1318, 1344), 'numpy.sqrt', 'np.sqrt', (['(ma ** 2 + mb ** 2)'], {}), '(ma ** 2 + mb ** 2)\n', (1325, 1344), True, 'import numpy as np\n'), ((1353, 1389), 'scipy.stats.norm.cdf', 'norm.cdf', ([], {'x': '(0)', 'loc': '(mb - ma)', 'scale': 'sd'}), '(x=0, loc=mb - ma, scale=sd)\n', (1361, 1389), False, 'from scipy.stats import norm\n'), ((869, 893), 'numpy.where', 'np.where', (['(m == 0)', '(0.5)', 'm'], {}), '(m == 0, 0.5, m)\n', (877, 893), True, 'import numpy as np\n'), ((1286, 1296), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (1293, 1296), True, 'import numpy as np\n'), ((1298, 1308), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (1305, 1308), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
try:
from debian.changelog import Changelog
except ImportError:
class Changelog(object):
def __init__(self, _):
pass
def get_version(self):
return '0.0.0'
from os import environ
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
changelog = join(here, 'debian/changelog')
requirements = open(join(here, 'requires.txt')).readlines()
dev_requirements = open(join(here, 'dev_requires.txt')).readlines()
additional = {}
# debhelper setup FAKEROOTKEY variable
if 'FAKEROOTKEY' not in environ:
additional['entry_points'] = {'console_scripts': [
'homebank-web = homebank.cli:manage'
]}
requirements.extend(dev_requirements)
setup(
name='homebank-wui',
version=str(Changelog(open(changelog)).get_version()),
description='Web User Interface for Homebank',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/rembish/homebank-wui',
packages=find_packages(),
include_package_data=True,
data_files=[
('/usr/share/homebank/', ['data/sample.xhb']),
],
zip_safe=False,
install_requires=requirements,
**additional)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((384, 414), 'os.path.join', 'join', (['here', '"""debian/changelog"""'], {}), "(here, 'debian/changelog')\n", (388, 414), False, 'from os.path import abspath, dirname, join\n'), ((353, 370), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (360, 370), False, 'from os.path import abspath, dirname, join\n'), ((1039, 1054), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1052, 1054), False, 'from setuptools import setup, find_packages\n'), ((435, 461), 'os.path.join', 'join', (['here', '"""requires.txt"""'], {}), "(here, 'requires.txt')\n", (439, 461), False, 'from os.path import abspath, dirname, join\n'), ((499, 529), 'os.path.join', 'join', (['here', '"""dev_requires.txt"""'], {}), "(here, 'dev_requires.txt')\n", (503, 529), False, 'from os.path import abspath, dirname, join\n')]
|
from pydevd_constants import * #@UnusedWildImport
from pydevd_file_utils import GetFilenameAndBase
from _pydev_imps import _pydev_thread
threadingCurrentThread = threading.currentThread
DEBUG = False
#=======================================================================================================================
# CustomFramesContainer
#=======================================================================================================================
class CustomFramesContainer:
pass
def CustomFramesContainerInit(): #Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = _pydev_thread.allocate_lock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
#Initialize it the first time (it may be reinitialized later on when dealing with a fork).
CustomFramesContainerInit()
#=======================================================================================================================
# CustomFrame
#=======================================================================================================================
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def addCustomFrame(frame, name, thread_id):
CustomFramesContainer.custom_frames_lock.acquire()
try:
curr_thread_id = GetThreadId(threadingCurrentThread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('addCustomFrame: %s (%s) %s %s\n' % (
frame_id, GetFilenameAndBase(frame)[1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_id
finally:
CustomFramesContainer.custom_frames_lock.release()
def updateCustomFrame(frame_id, frame, thread_id, name=None):
CustomFramesContainer.custom_frames_lock.acquire()
try:
if DEBUG:
sys.stderr.write('updateCustomFrame: %s\n' % frame_id)
try:
old = CustomFramesContainer.custom_frames[frame_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_id,))
import traceback;traceback.print_exc()
CustomFramesContainer._py_db_command_thread_event.set()
finally:
CustomFramesContainer.custom_frames_lock.release()
def getCustomFrame(thread_id, frame_id):
'''
:param thread_id: This should actually be the frame_id which is returned by addCustomFrame.
:param frame_id: This is the actual id() of the frame
'''
CustomFramesContainer.custom_frames_lock.acquire()
try:
frame_id = int(frame_id)
f = CustomFramesContainer.custom_frames[thread_id].frame
while f is not None:
if id(f) == frame_id:
return f
f = f.f_back
finally:
f = None
CustomFramesContainer.custom_frames_lock.release()
def removeCustomFrame(frame_id):
CustomFramesContainer.custom_frames_lock.acquire()
try:
if DEBUG:
sys.stderr.write('removeCustomFrame: %s\n' % frame_id)
DictPop(CustomFramesContainer.custom_frames, frame_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
finally:
CustomFramesContainer.custom_frames_lock.release()
|
[
"traceback.print_exc",
"_pydev_imps._pydev_thread.allocate_lock",
"pydevd_file_utils.GetFilenameAndBase"
] |
[((659, 688), '_pydev_imps._pydev_thread.allocate_lock', '_pydev_thread.allocate_lock', ([], {}), '()\n', (686, 688), False, 'from _pydev_imps import _pydev_thread\n'), ((3629, 3650), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3648, 3650), False, 'import traceback\n'), ((2756, 2781), 'pydevd_file_utils.GetFilenameAndBase', 'GetFilenameAndBase', (['frame'], {}), '(frame)\n', (2774, 2781), False, 'from pydevd_file_utils import GetFilenameAndBase\n')]
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
CNI implementation
Demultiplexes on the CNI_COMMAND and runs the necessary operation
"""
import ctypes
import errno
import inspect
import json
import os
import sys
import logging
from pyroute2 import NetlinkError, IPRoute
from interface import Interface as CniInterface
from interface import CniNamespace as CniNamespace
from cni import Error as Error
CNI_ERROR_GET_PARENT_INTF = 401
CNI_ERROR_ADD_VLAN_INTF = 402
CNI_ERROR_DEL_VLAN_INTF = 403
CNI_ERROR_ADD_MACVLAN = 404
CNI_ERROR_DEL_MACVLAN = 405
# logger for the file
logger = None
class CniMacVlan(CniInterface, object):
'''
Class to manage macvlan interfaces for containers.
This is typically used in nested-k8s scenario where containers are spawned
inside the container. The VMI for container is modeled as sub-interface in
this case.
The class creates a vlan-interface corresponding to the vlan in
sub-interface and then creates a macvlan interface over it.
'''
def __init__(self, cni, mac, host_ifname, tag):
self.pid = os.getpid()
self.container_mac = mac
self.host_ifname = host_ifname
self.vlan_tag = tag
self.vlan_ifname = CniMacVlan._make_vlan_intf_name(tag)
CniInterface.__init__(self, cni)
return
@staticmethod
def _make_vlan_intf_name(tag):
return 'cn-' + str(tag)
def delete_interface(self):
'''
Delete the interface.
Deletes both VLAN Tag interface and MACVlan interface
'''
# Find the VLAN interface interface from the MACVlan interface
link = self.get_link()
if link is None:
return
vlan_idx = None
for i in link[0]['attrs']:
if (i[0] == 'IFLA_LINK'):
vlan_idx = i[1]
break
if vlan_idx is None:
raise Error(CNI_ERROR_DEL_VLAN_INTF,
'Error finding vlan interface. Interface inside ' +
' container ' + self.cni.container_ifname)
# Delete the VLAN Tag interface.
# It will delete the interface inside container also
try:
iproute = IPRoute()
iproute.link('del', index=vlan_idx)
except NetlinkError as e:
raise Error(CNI_ERROR_DEL_VLAN_INTF,
'Error deleting VLAN interface. Parent interface ' +
self.host_ifname + ' vlan-tag ' + self.vlan_tag +
' vlan-ifindex ' + str(vlan_idx) +
' code ' + str(e.code) + ' message ' + e.message)
return
def _locate_parent_interface(self, iproute):
# Ensure the host parent-interface is preset in host network-namespace
host_if = iproute.link_lookup(ifname=self.host_ifname)
if len(host_if) == 0:
raise Error(CNI_ERROR_GET_PARENT_INTF,
'Error creating parent interface ' +
self.host_ifname + '. Interface not found')
return host_if[0]
def _locate_vlan_interface(self, iproute, parent_ifindex):
# Ensure vlan-interface is created in the host network-namespace
vlan_if = iproute.link_lookup(ifname=self.vlan_ifname)
if len(vlan_if) is not 0:
# vlan-interface already present
return vlan_if[0]
try:
# Create vlan-interface
iproute.link('add', ifname=self.vlan_ifname, kind='vlan',
vlan_id=self.vlan_tag, link=parent_ifindex)
except NetlinkError as e:
if e.code != errno.EEXIST:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Error creating vlan interface. ' +
' Parent interface ' + self.host_ifname +
' vlan id ' + str(self.vlan_tag) +
' vlan ifname ' + self.vlan_ifname +
' code ' + str(e.code) +
' message ' + e.message)
vlan_if = iproute.link_lookup(ifname=self.vlan_ifname)
return vlan_if[0]
# Ensure the temporary interface is created and moved to
# container network-namespace
def _locate_peer_vlan_interface(self, iproute, cn_iproute, vlan_ifindex,
cn_ifname):
# Check if interface already present in container network-namespace
cn_intf = cn_iproute.link_lookup(ifname=cn_ifname)
if len(cn_intf) is not 0:
return cn_intf[0]
# Interface not present inside container.
# Check if it was already created in host network-namespace
cn_intf = iproute.link_lookup(ifname=cn_ifname)
if len(cn_intf) == 0:
# Interface not present in host network-namespace also
# Create interface in host-os first
try:
iproute.link('add', ifname=cn_ifname, kind='macvlan',
link=vlan_ifindex, macvlan_mode='vepa')
except NetlinkError as e:
if e.code != errno.EEXIST:
raise Error(CNI_ERROR_ADD_MACVLAN,
'Error creating macvlan interface ' +
cn_ifname +
' vlan iterface ' + self.vlan_ifname +
' code ' + str(e.code) +
' message ' + e.message)
cn_intf = iproute.link_lookup(ifname=cn_ifname)
# Move the temporary interface to container network-namespace
with CniNamespace(self.cni.container_netns):
iproute.link('set', index=cn_intf[0], net_ns_pid=self.pid)
return cn_intf[0]
def _move_link(self, cn_iproute, cn_intf):
with CniNamespace(self.cni.container_netns):
cn_iproute.link('set', index=cn_intf,
ifname=self.cni.container_ifname)
return
def create_interface(self):
'''
Create MACVlan interface
Creates VLAN interface first based on VLAN tag for sub-interface
then create macvlan interface above the vlan interface
'''
# First check if interface already present inside container
if self.get_link() is not None:
return
if self.vlan_tag is None:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Missing vlan-tag for macvlan interface' )
if self.host_ifname is None:
raise Error(CNI_ERROR_ADD_VLAN_INTF,
'Missing parent-interface for macvlan interface')
# Open IPRoute socket in both host and container network namespaces
iproute = IPRoute()
cn_iproute = None
with CniNamespace(self.cni.container_netns):
cn_iproute = IPRoute()
# Locate the parent interface in host-os network-namespace
host_ifindex = self._locate_parent_interface(iproute)
# Locate vlan interface in host-os network-namespace
vlan_ifindex = self._locate_vlan_interface(iproute, host_ifindex)
# Creating interface inside container involves following steps,
# 1. Create a macvlan interface in host network-namespace with a
# temporary name
# 2. Move temporary interface inside container
# 3. Rename temporary interface to configured name inside container
# We must also ensure that we recover from any of the failed state
# in earlier invocation
# Ensure temporary interface present inside container
cn_ifname = self.vlan_ifname + '-cn'
cn_ifindex = self._locate_peer_vlan_interface(iproute, cn_iproute,
vlan_ifindex, cn_ifname)
# Move temporary interface to container-ifname
self._move_link(cn_iproute, cn_ifindex)
return
def configure_interface(self, ip4, plen, gw):
# Set link-up for interface on host-os
iproute = IPRoute()
idx = iproute.link_lookup(ifname=self.vlan_ifname)[0]
iproute.link('set', index=idx, state='up')
super(CniMacVlan, self).configure_interface(ip4, plen, gw)
|
[
"cni.Error",
"os.getpid",
"interface.CniNamespace",
"interface.Interface.__init__",
"pyroute2.IPRoute"
] |
[((1153, 1164), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1162, 1164), False, 'import os\n'), ((1337, 1369), 'interface.Interface.__init__', 'CniInterface.__init__', (['self', 'cni'], {}), '(self, cni)\n', (1358, 1369), True, 'from interface import Interface as CniInterface\n'), ((6830, 6839), 'pyroute2.IPRoute', 'IPRoute', ([], {}), '()\n', (6837, 6839), False, 'from pyroute2 import NetlinkError, IPRoute\n'), ((8129, 8138), 'pyroute2.IPRoute', 'IPRoute', ([], {}), '()\n', (8136, 8138), False, 'from pyroute2 import NetlinkError, IPRoute\n'), ((1966, 2100), 'cni.Error', 'Error', (['CNI_ERROR_DEL_VLAN_INTF', "('Error finding vlan interface. Interface inside ' + ' container ' + self.\n cni.container_ifname)"], {}), "(CNI_ERROR_DEL_VLAN_INTF, \n 'Error finding vlan interface. Interface inside ' + ' container ' +\n self.cni.container_ifname)\n", (1971, 2100), True, 'from cni import Error as Error\n'), ((2278, 2287), 'pyroute2.IPRoute', 'IPRoute', ([], {}), '()\n', (2285, 2287), False, 'from pyroute2 import NetlinkError, IPRoute\n'), ((2958, 3076), 'cni.Error', 'Error', (['CNI_ERROR_GET_PARENT_INTF', "('Error creating parent interface ' + self.host_ifname +\n '. Interface not found')"], {}), "(CNI_ERROR_GET_PARENT_INTF, 'Error creating parent interface ' + self.\n host_ifname + '. Interface not found')\n", (2963, 3076), True, 'from cni import Error as Error\n'), ((5703, 5741), 'interface.CniNamespace', 'CniNamespace', (['self.cni.container_netns'], {}), '(self.cni.container_netns)\n', (5715, 5741), True, 'from interface import CniNamespace as CniNamespace\n'), ((5903, 5941), 'interface.CniNamespace', 'CniNamespace', (['self.cni.container_netns'], {}), '(self.cni.container_netns)\n', (5915, 5941), True, 'from interface import CniNamespace as CniNamespace\n'), ((6476, 6548), 'cni.Error', 'Error', (['CNI_ERROR_ADD_VLAN_INTF', '"""Missing vlan-tag for macvlan interface"""'], {}), "(CNI_ERROR_ADD_VLAN_INTF, 'Missing vlan-tag for macvlan interface')\n", (6481, 6548), True, 'from cni import Error as Error\n'), ((6630, 6715), 'cni.Error', 'Error', (['CNI_ERROR_ADD_VLAN_INTF', '"""Missing parent-interface for macvlan interface"""'], {}), "(CNI_ERROR_ADD_VLAN_INTF, 'Missing parent-interface for macvlan interface'\n )\n", (6635, 6715), True, 'from cni import Error as Error\n'), ((6879, 6917), 'interface.CniNamespace', 'CniNamespace', (['self.cni.container_netns'], {}), '(self.cni.container_netns)\n', (6891, 6917), True, 'from interface import CniNamespace as CniNamespace\n'), ((6944, 6953), 'pyroute2.IPRoute', 'IPRoute', ([], {}), '()\n', (6951, 6953), False, 'from pyroute2 import NetlinkError, IPRoute\n')]
|
# Author: Yubo "Paul" Yang
# Email: <EMAIL>
# Kyrt is a versatile fabric exclusive to the planet Florina of Sark.
# The fluorescent and mutable kyrt is ideal for artsy decorations.
# OK, this is a library of reasonable defaults for matplotlib figures.
# May this library restore elegance to your plots.
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
# ======================== library of defaults =========================
# expose some default colors for convenience
from matplotlib.cm import get_cmap
cmap = get_cmap('viridis')
colors = cmap.colors # 256 default colors
dark8 = [ # Colors from www.ColorBrewer.org by Cynthia A. Brewer, Geography, Pennsylvania State University.
'#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e',
'#e6ab02',
'#a6761d',
'#666666'
]
errorbar_style = {
'cyq': {
'linestyle': 'none', # do 1 thing
'markersize': 3.5, # readable
'markeredgecolor': 'black', # accentuate
'markeredgewidth': 0.3,
'capsize': 4,
'elinewidth': 0.5
}
}
# ======================== level 0: basic color =========================
def get_cmap(name='viridis'):
""" return color map by name
Args:
name (str, optional): name of color map, default 'viridis'
Return:
matplotlib.colors.ListedColormap: requested colormap
"""
from matplotlib import cm
cmap = cm.get_cmap(name)
return cmap
def get_norm(vmin, vmax):
""" return norm function for scalar in range (vmin, vmax)
Args:
vmin (float): value minimum
vmax (float): value maximum
Return:
matplotlib.colors.Normalize: color normalization function
"""
norm = plt.Normalize(vmin, vmax)
return norm
def scalar_colormap(vmin, vmax, name='viridis'):
""" return a function that maps a number to a color
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
function: float -> (float,)*4 RGBA color space
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
def v2c(v): # function mapping value to color
return cmap(norm(v))
return v2c
def scalar_colorbar(vmin, vmax, name='viridis', **kwargs):
""" return a colorbar for scalar_color_map()
Args:
vmin (float): minimum scalar value
vmax (float): maximum scalar value
name (str, optional): color map name, default is 'viridis'
Return:
matplotlib.colorbar.Colorbar: colorbar
"""
cmap = get_cmap(name)
norm = get_norm(vmin, vmax)
# issue 3644
sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
sm.set_array([])
cbar = plt.colorbar(sm, **kwargs)
return cbar
# ======================== level 0: basic ax edits =========================
def figaxad(labelsize=12):
""" construct a absolute/difference (ad) figure
top 3/4 of the plot will be comparison at an absolute scale
bottom 1/4 of the plot will be comparison at a relative scale
Args:
labelsize (int, optional): tick label size
Return:
(fig, axa, axd): figure and axes for absolute and difference plots
"""
from matplotlib.gridspec import GridSpec
gs = GridSpec(4, 4)
fig = plt.figure()
axa = fig.add_subplot(gs[0:3, :])
axd = fig.add_subplot(gs[3, :], sharex=axa)
plt.setp(axa.get_xticklabels(), visible=False)
axa.tick_params(axis='y', labelsize=labelsize)
axd.tick_params(labelsize=labelsize)
fig.subplots_adjust(hspace=0)
return fig, axa, axd
def set_xy_format(ax, xfmt='%3.2f', yfmt='%3.2f'):
""" change x,y tick formats e.g. number of digits
Args:
ax (plt.Axes): matplotlib axes
xfmt (int,optional): xtick format, default is '%3.2f'
yfmt (int,optional): ytick format, default is '%3.2f'
"""
ax.get_xaxis().set_major_formatter(FormatStrFormatter(xfmt))
ax.get_yaxis().set_major_formatter(FormatStrFormatter(yfmt))
def set_tick_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y tick fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xtick fontsize, default is 14
ysize (int,optional): ytick fontsize, default is 14
xweight (str,optional): xtick fontweight, default is 'bold'
yweight (str,optional): ytick fontweight, default is 'bold'
kwargs (dict): other tick-related properties
"""
plt.setp(ax.get_xticklabels(), fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.get_yticklabels(), fontsize=ysize,
fontweight=yweight, **kwargs)
def set_label_font(ax, xsize=14, ysize=14,
xweight='bold', yweight='bold', **kwargs):
""" change x,y label fonts
Args:
ax (plt.Axes): matplotlib axes
xsize (int,optional): xlabel fontsize, default is 14
ysize (int,optional): ylabel fontsize, default is 14
xweight (str,optional): xlabel fontweight, default is 'bold'
yweight (str,optional): ylabel fontweight, default is 'bold'
kwargs (dict): other label-related properties
"""
plt.setp(ax.xaxis.label, fontsize=xsize,
fontweight=xweight, **kwargs)
plt.setp(ax.yaxis.label, fontsize=ysize,
fontweight=yweight, **kwargs)
def xtop(ax):
""" move xaxis label and ticks to the top
Args:
ax (plt.Axes): matplotlib axes
"""
xaxis = ax.get_xaxis()
xaxis.tick_top()
xaxis.set_label_position('top')
def yright(ax):
""" move yaxis label and ticks to the right
Args:
ax (plt.Axes): matplotlib axes
"""
yaxis = ax.get_yaxis()
yaxis.tick_right()
yaxis.set_label_position('right')
# ======================= level 1: advanced ax edits ========================
def cox(ax, x, xtlabels):
"""Add co-xticklabels at top of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
x (list): xtick locations
xtlabels (list): xtick labels
"""
ax1 = ax.twiny()
ax1.set_xlim(ax.get_xlim())
ax.set_xticks(x)
ax1.set_xticks(x)
ax1.set_xticklabels(xtlabels)
xtop(ax1)
return ax1
def coy(ax, y, ytlabels):
"""Add co-yticklabels on the right of the plot, e.g., with a different unit
Args:
ax (plt.Axes): matplotlib axes
y (list): ytick locations
ytlabels (list): ytick labels
"""
ax1 = ax.twinx()
ax1.set_ylim(ax.get_ylim())
ax.set_yticks(y)
ax1.set_yticks(y)
ax1.set_yticklabels(ytlabels)
yright(ax1)
return ax1
def align_ylim(ax1, ax2):
ylim1 = ax1.get_ylim()
ylim2 = ax2.get_ylim()
ymin = min(ylim1[0], ylim2[0])
ymax = max(ylim1[1], ylim2[1])
ylim = (ymin, ymax)
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
# ====================== level 0: basic legend edits =======================
def set_legend_marker_size(leg, ms=10):
handl = leg.legendHandles
msl = [ms]*len(handl) # override marker sizes here
for hand, ms in zip(handl, msl):
hand._legmarker.set_markersize(ms)
def create_legend(ax, styles, labels, **kwargs):
""" create custom legend
learned from "Composing Custom Legends"
Args:
ax (plt.Axes): matplotlib axes
Return:
plt.legend.Legend: legend artist
"""
from matplotlib.lines import Line2D
custom_lines = [Line2D([], [], **style) for style in styles]
leg = ax.legend(custom_lines, labels, **kwargs)
return leg
# ====================== level 0: global edits =======================
def set_style(style='ticks', context='talk', **kwargs):
import seaborn as sns
if (context=='talk') and ('font_scale' not in kwargs):
kwargs['font_scale'] = 0.7
sns.set_style(style)
sns.set_context(context, **kwargs)
# ====================== level 0: basic Line2D edits =======================
def get_style(line):
""" get plot styles from Line2D object
mostly copied from "Line2D.update_from"
Args:
line (Line2D): source of style
Return:
dict: line styles readily usable for another plot
"""
styles = {
'linestyle': line.get_linestyle(),
'linewidth': line.get_linewidth(),
'color': line.get_color(),
'markersize': line.get_markersize(),
'linestyle': line.get_linestyle(),
'marker': line.get_marker()
}
return styles
# ====================== level 0: basic Line2D =======================
def errorshade(ax, x, ym, ye, **kwargs):
line = ax.plot(x, ym, **kwargs)
alpha = 0.4
myc = line[0].get_color()
eline = ax.fill_between(x, ym-ye, ym+ye, color=myc, alpha=alpha)
return line, eline
# ===================== level 1: fit line ======================
def show_fit(ax, line, model, sel=None, nx=64, popt=None,
xmin=None, xmax=None, circle=True, circle_style=None,
cross=False, cross_style=None, **kwargs):
""" fit a segment of (x, y) data and show fit
get x, y data from line; use sel to make selection
Args:
ax (Axes): matplotlib axes
line (Line2D): line with data
model (callable): model function
sel (np.array, optional): boolean selector array
nx (int, optional): grid size, default 64
xmin (float, optional): grid min
xmax (float, optional): grid max
circle (bool, optional): circle selected points, default True
cross (bool, optional): cross out deselected points, default False
Return:
(np.array, np.array, list): (popt, perr, lines)
"""
import numpy as np
from scipy.optimize import curve_fit
# get and select data to fit
myx = line.get_xdata()
myy = line.get_ydata()
# show selected data
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx1 = myx[sel]
myy1 = myy[sel]
myx11 = myx[~sel]
myy11 = myy[~sel]
if xmin is None:
xmin = myx1.min()
if xmax is None:
xmax = myx1.max()
lines = []
if circle:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'o'
styles['fillstyle'] = 'none'
if circle_style is not None:
styles.update(circle_style)
line1 = ax.plot(myx[sel], myy[sel], **styles)
lines.append(line1[0])
if cross:
styles = get_style(line)
styles['linestyle'] = ''
styles['marker'] = 'x'
if cross_style is not None:
styles.update(cross_style)
line11 = ax.plot(myx11, myy11, **styles)
lines.append(line11[0])
if popt is None: # perform fit
popt, pcov = curve_fit(model, myx1, myy1)
perr = np.sqrt(np.diag(pcov))
else:
perr = None
# show fit
finex = np.linspace(xmin, xmax, nx)
line2 = ax.plot(finex, model(finex, *popt),
c=line.get_color(), **kwargs)
lines.append(line2[0])
return popt, perr, lines
def smooth_bspline(myx, myy, nxmult=10, **spl_kws):
import numpy as np
from scipy.interpolate import splrep, splev
nx = len(myx)*nxmult
idx = np.argsort(myx)
tck = splrep(myx[idx], myy[idx], **spl_kws)
finex = np.linspace(myx.min(), myx.max(), nx)
finey = splev(finex, tck)
return finex, finey
def show_spline(ax, line, spl_kws=dict(), sel=None, **kwargs):
""" show a smooth spline through given line x y
Args:
ax (plt.Axes): matplotlib axes
line (Line1D): matplotlib line object
spl_kws (dict, optional): keyword arguments to splrep, default is empty
nx (int, optional): number of points to allocate to 1D grid
Return:
Line1D: interpolating line
"""
import numpy as np
myx = line.get_xdata()
myy = line.get_ydata()
if sel is None:
sel = np.ones(len(myx), dtype=bool)
myx = myx[sel]
myy = myy[sel]
finex, finey = smooth_bspline(myx, myy, **spl_kws)
color = line.get_color()
line1 = ax.plot(finex, finey, c=color, **kwargs)
return line1
def krig(finex, x0, y0, length_scale, noise_level):
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = DotProduct() + RBF(length_scale=length_scale)
kernel += WhiteKernel(noise_level=noise_level)
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(x0[:, None], y0)
ym, ye = gpr.predict(finex[:, None], return_std=True)
return ym, ye
def gpr_errorshade(ax, x, ym, ye,
length_scale, noise_level, fb_kwargs=None,
**kwargs):
"""WARNING: length_scale and noise_level are VERY DIFFICULT to tune """
# make errorbar plot and extract color
if ('ls' not in kwargs) and ('linestyle' not in kwargs):
kwargs['ls'] = ''
line = ax.errorbar(x, ym, ye, **kwargs)
myc = line[0].get_color()
# smoothly fit data
import numpy as np
dx = abs(x[1]-x[0])
xmin = x.min(); xmax = x.max()
finex = np.arange(xmin, xmax, dx/10.)
ylm, yle = krig(finex, x, ym-ye,
length_scale=length_scale, noise_level=noise_level)
yhm, yhe = krig(finex, x, ym+ye,
length_scale=length_scale, noise_level=noise_level)
# plot fit
if fb_kwargs is None:
fb_kwargs = {'color': myc, 'alpha': 0.4}
eline = ax.fill_between(finex, ylm-yle, yhm+yhe, **fb_kwargs)
return line[0], eline
# ===================== level 2: insets ======================
def inset_zoom(fig, ax_box, xlim, ylim, draw_func, xy_label=False):
""" show an inset that zooms into a given part of the figure
Args:
fig (plt.Figure): figure
ax_box (tuple): inset location and size (x0, y0, dx, dy) in figure ratio
xlim (tuple): (xmin, xmax)
ylim (tuple): (ymin, ymax)
draw_func (callable): draw_func(ax) should recreate the figure
xy_label (bool, optional): label inset axes, default is False
Return:
plt.Axes: inset axes
Example:
>>> ax1 = inset_zoom(fig, [0.15, 0.15, 0.3, 0.3], [0.1, 0.5], [-0.02, 0.01],
>>> lambda ax: ax.plot(x, y))
>>> ax.indicate_inset_zoom(axins)
"""
ax1 = fig.add_axes(ax_box)
ax1.set_xlim(*xlim)
ax1.set_ylim(*ylim)
draw_func(ax1)
if not xy_label:
ax1.set_xticks([])
ax1.set_yticks([])
return ax1
# ======================== composition =========================
def pretty_up(ax):
set_tick_font(ax)
set_label_font(ax)
|
[
"matplotlib.cm.get_cmap",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.diag",
"matplotlib.pyplot.Normalize",
"matplotlib.lines.Line2D",
"sklearn.gaussian_process.kernels.DotProduct",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"scipy.interpolate.splrep",
"seaborn.set_context",
"seaborn.set_style",
"sklearn.gaussian_process.kernels.RBF",
"scipy.optimize.curve_fit",
"sklearn.gaussian_process.kernels.WhiteKernel",
"scipy.interpolate.splev",
"matplotlib.gridspec.GridSpec",
"sklearn.gaussian_process.gpr.GaussianProcessRegressor"
] |
[((573, 592), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (581, 592), False, 'from matplotlib.cm import get_cmap\n'), ((1409, 1426), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['name'], {}), '(name)\n', (1420, 1426), False, 'from matplotlib import cm\n'), ((1688, 1713), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (1701, 1713), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2072), 'matplotlib.cm.get_cmap', 'get_cmap', (['name'], {}), '(name)\n', (2066, 2072), False, 'from matplotlib.cm import get_cmap\n'), ((2516, 2530), 'matplotlib.cm.get_cmap', 'get_cmap', (['name'], {}), '(name)\n', (2524, 2530), False, 'from matplotlib.cm import get_cmap\n'), ((2583, 2626), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (2604, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2681), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {}), '(sm, **kwargs)\n', (2667, 2681), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3186), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (3180, 3186), False, 'from matplotlib.gridspec import GridSpec\n'), ((3195, 3207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3205, 3207), True, 'import matplotlib.pyplot as plt\n'), ((4963, 5033), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.xaxis.label'], {'fontsize': 'xsize', 'fontweight': 'xweight'}), '(ax.xaxis.label, fontsize=xsize, fontweight=xweight, **kwargs)\n', (4971, 5033), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5110), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.yaxis.label'], {'fontsize': 'ysize', 'fontweight': 'yweight'}), '(ax.yaxis.label, fontsize=ysize, fontweight=yweight, **kwargs)\n', (5048, 5110), True, 'import matplotlib.pyplot as plt\n'), ((7406, 7426), 'seaborn.set_style', 'sns.set_style', (['style'], {}), '(style)\n', (7419, 7426), True, 'import seaborn as sns\n'), ((7429, 7463), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context, **kwargs)\n', (7444, 7463), True, 'import seaborn as sns\n'), ((10171, 10198), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (10182, 10198), True, 'import numpy as np\n'), ((10482, 10497), 'numpy.argsort', 'np.argsort', (['myx'], {}), '(myx)\n', (10492, 10497), True, 'import numpy as np\n'), ((10506, 10543), 'scipy.interpolate.splrep', 'splrep', (['myx[idx]', 'myy[idx]'], {}), '(myx[idx], myy[idx], **spl_kws)\n', (10512, 10543), False, 'from scipy.interpolate import splrep, splev\n'), ((10602, 10619), 'scipy.interpolate.splev', 'splev', (['finex', 'tck'], {}), '(finex, tck)\n', (10607, 10619), False, 'from scipy.interpolate import splrep, splev\n'), ((11650, 11686), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': 'noise_level'}), '(noise_level=noise_level)\n', (11661, 11686), False, 'from sklearn.gaussian_process.kernels import WhiteKernel\n'), ((11695, 11734), 'sklearn.gaussian_process.gpr.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel'}), '(kernel=kernel)\n', (11719, 11734), False, 'from sklearn.gaussian_process.gpr import GaussianProcessRegressor\n'), ((12301, 12333), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(dx / 10.0)'], {}), '(xmin, xmax, dx / 10.0)\n', (12310, 12333), True, 'import numpy as np\n'), ((3789, 3813), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['xfmt'], {}), '(xfmt)\n', (3807, 3813), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((3852, 3876), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['yfmt'], {}), '(yfmt)\n', (3870, 3876), False, 'from matplotlib.ticker import FormatStrFormatter\n'), ((7056, 7079), 'matplotlib.lines.Line2D', 'Line2D', (['[]', '[]'], {}), '([], [], **style)\n', (7062, 7079), False, 'from matplotlib.lines import Line2D\n'), ((10061, 10089), 'scipy.optimize.curve_fit', 'curve_fit', (['model', 'myx1', 'myy1'], {}), '(model, myx1, myy1)\n', (10070, 10089), False, 'from scipy.optimize import curve_fit\n'), ((11592, 11604), 'sklearn.gaussian_process.kernels.DotProduct', 'DotProduct', ([], {}), '()\n', (11602, 11604), False, 'from sklearn.gaussian_process.kernels import DotProduct, RBF\n'), ((11607, 11637), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ([], {'length_scale': 'length_scale'}), '(length_scale=length_scale)\n', (11610, 11637), False, 'from sklearn.gaussian_process.kernels import DotProduct, RBF\n'), ((10109, 10122), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (10116, 10122), True, 'import numpy as np\n')]
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.task_manager.classes import CeleryQueue
from mayan.apps.task_manager.workers import worker_d
queue_tools = CeleryQueue(label=_('Tools'), name='tools', worker=worker_d)
|
[
"django.utils.translation.ugettext_lazy"
] |
[((204, 214), 'django.utils.translation.ugettext_lazy', '_', (['"""Tools"""'], {}), "('Tools')\n", (205, 214), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
# this project is licensed under the WTFPLv2, see COPYING.txt for details
"""Helpers for lexer use
In EYE, builtin lexers from QScintilla are used. See :any:`PyQt5.Qsci.QsciLexer`.
"""
import mimetypes
from PyQt5.QtGui import QColor, QFont
from PyQt5.Qsci import (
QsciLexerBash, QsciLexerBatch, QsciLexerCPP, QsciLexerCSharp, QsciLexerJava, QsciLexerJavaScript,
QsciLexerCSS, QsciLexerD, QsciLexerFortran, QsciLexerHTML, QsciLexerXML, QsciLexerLua,
QsciLexerMakefile, QsciLexerPascal, QsciLexerPerl, QsciLexerPO, QsciLexerPostScript,
QsciLexerPOV, QsciLexerProperties, QsciLexerPython, QsciLexerRuby, QsciLexerSQL, QsciLexerTCL,
QsciLexerTeX, QsciLexerYAML, QsciLexerDiff,
)
__all__ = ('extensionToLexer', 'mimeToLexer', 'applyStyles', 'stylesFromLexer')
def stylesFromLexer(lexer):
"""Return the style names used by a QsciLexer object
Lexers provide a number of styles names, like "Comment", "Operator", "Identifier", etc.
"""
styles = {}
for i in range(1 << lexer.styleBitsNeeded()):
name = lexer.description(i)
if not name:
break
styles[name] = i
return styles
def applyStyles(lexer, spec):
styles = stylesFromLexer(lexer)
for name, values in spec:
style = styles.get(name, -1)
if style >= 0:
lexer.setColor(QColor(values[0]))
if len(values) > 1:
lexer.setPaper(QColor(values[1]))
if len(values) > 2:
lexer.setFont(QFont(values[2]))
_extensionLexer = {
'sh': QsciLexerBash,
'bash': QsciLexerBash,
'zsh': QsciLexerBash,
'bat': QsciLexerBatch,
'cmd': QsciLexerBatch,
'c': QsciLexerCPP,
'cc': QsciLexerCPP,
'cpp': QsciLexerCPP,
'cxx': QsciLexerCPP,
'h': QsciLexerCPP,
'hh': QsciLexerCPP,
'hpp': QsciLexerCPP,
'hxx': QsciLexerCPP,
'cs': QsciLexerCSharp,
'java': QsciLexerJava,
'js': QsciLexerJavaScript,
'json': QsciLexerJavaScript,
'css': QsciLexerCSS,
'd': QsciLexerD,
'patch': QsciLexerDiff,
'f': QsciLexerFortran,
'html': QsciLexerHTML,
'htm': QsciLexerHTML,
'xml': QsciLexerXML,
'lua': QsciLexerLua,
'Makefile': QsciLexerMakefile,
'pas': QsciLexerPascal,
'pl': QsciLexerPerl,
'pm': QsciLexerPerl,
'po': QsciLexerPO,
'pot': QsciLexerPO,
'ps': QsciLexerPostScript,
'pov': QsciLexerPOV,
'inc': QsciLexerPOV,
'properties': QsciLexerProperties,
'ini': QsciLexerProperties,
'py': QsciLexerPython,
'rb': QsciLexerRuby,
'sql': QsciLexerSQL,
'tcl': QsciLexerTCL,
'tex': QsciLexerTeX,
'yaml': QsciLexerYAML,
'yml': QsciLexerYAML,
}
def extensionToLexer(ext):
"""Return a QsciLexer corresponding to extension
If no appropriate lexer is found for `ext`, `None` is returned.
"""
if ext and ext.startswith('.'):
ext = ext[1:]
return _extensionLexer.get(ext)
def mimeToLexer(mime):
"""Return a QsciLexer corresponding to mimetype
If no appropriate lexer is found for `mime`, `None` is returned.
"""
return extensionToLexer(mimetypes.guess_extension(mime))
|
[
"mimetypes.guess_extension",
"PyQt5.QtGui.QFont",
"PyQt5.QtGui.QColor"
] |
[((2829, 2860), 'mimetypes.guess_extension', 'mimetypes.guess_extension', (['mime'], {}), '(mime)\n', (2854, 2860), False, 'import mimetypes\n'), ((1252, 1269), 'PyQt5.QtGui.QColor', 'QColor', (['values[0]'], {}), '(values[0])\n', (1258, 1269), False, 'from PyQt5.QtGui import QColor, QFont\n'), ((1313, 1330), 'PyQt5.QtGui.QColor', 'QColor', (['values[1]'], {}), '(values[1])\n', (1319, 1330), False, 'from PyQt5.QtGui import QColor, QFont\n'), ((1373, 1389), 'PyQt5.QtGui.QFont', 'QFont', (['values[2]'], {}), '(values[2])\n', (1378, 1389), False, 'from PyQt5.QtGui import QColor, QFont\n')]
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
# Copyright (c) 2019 YunYang1994 <<EMAIL>>
# License: MIT (https://opensource.org/licenses/MIT)
# This file has been modified by Graphcore Ltd.
import argparse
import json
import math
import os
import shutil
import time
import numpy as np
import core.utils as utils
import cv2
import log
import tensorflow as tf
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from ipu_utils import stages_constructor
from log import logger
from tensorflow.python import ipu
from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops
class YoloTest(object):
def __init__(self, opts):
self.input_size = opts["test"]["input_size"]
self.classes = utils.read_class_names(opts["yolo"]["classes"])
self.num_classes = len(self.classes)
self.score_threshold = opts["test"]["score_threshold"]
self.iou_threshold = opts["test"]["iou_threshold"]
self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
self.annotation_path = opts["test"]["annot_path"]
self.weight_file = opts["test"]["weight_file"]
self.write_image = opts["test"]["write_image"]
self.write_image_path = opts["test"]["write_image_path"]
self.show_label = opts["test"]["show_label"]
self.batch_size = opts["test"]["batch_size"]
self.precision = tf.float16 if opts["yolo"]["precision"] == "fp16" else tf.float32
self.use_moving_avg = opts["yolo"]["use_moving_avg"]
self.repeat_count = opts["test"]["repeat_count"]
self.use_infeed_queue = opts["test"]["use_infeed_queue"]
self.predicted_file_path = opts["test"]["predicted_file_path"]
self.ground_truth_file_path = opts["test"]["ground_truth_file_path"]
self.meta_dict = {}
self.testset = Dataset("test", opts)
# Configure arguments for targeting the IPU
config = ipu.config.IPUConfig()
config.auto_select_ipus = 1
config.configure_ipu_system()
model = YOLOV3(False, opts)
# construct model
# we will put whole network on one ipu
layers = []
# build layer functions for backbone and upsample
layers.extend(model.build_backbone())
# last layer of darknet53 is classification layer, so it have 52 conv layers
assert len(layers) == 52
layers.extend(model.build_upsample())
# there is 25 conv layers if we count upsmaple as a conv layer
assert len(layers) == 52+25
# decoding layer and loss layer is always put on last IPU
layers.append(model.decode_boxes)
# reuse stages_constructor so we don't need to pass params by hand
network_func = stages_constructor(
[layers],
["input_data", "nums"],
["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
input_shape = (self.batch_size, self.input_size, self.input_size, 3)
self.lines, self.image_dict = self.load_data()
if self.use_infeed_queue:
# The dataset for feeding the graphs
def data_gen():
return self.data_generator()
with tf.device("cpu"):
ds = tf.data.Dataset.from_generator(data_gen,
output_types=(tf.float16, tf.int32),
output_shapes=(input_shape, (self.batch_size,))
)
ds = ds.repeat()
ds = ds.prefetch(self.repeat_count*10)
# The host side queues
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def model_func(input_data, nums):
pred_sbbox, pred_mbbox, pred_lbbox, nums = network_func(input_data, nums)
outfeed = outfeed_queue.enqueue(
{"pred_sbbox": pred_sbbox, "pred_mbbox": pred_mbbox, "pred_lbbox": pred_lbbox, "nums": nums})
return outfeed
def my_net():
r = loops.repeat(self.repeat_count,
model_func, [], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.run_loop = ipu.ipu_compiler.compile(
my_net, inputs=[])
# The outfeed dequeue has to happen after the outfeed enqueue
self.dequeue_outfeed = outfeed_queue.dequeue()
self.sess = tf.Session(config=tf.ConfigProto())
self.sess.run(infeed_queue.initializer)
else:
# if using feed dict, it will be simpler
# the cost is throughput
with tf.device("cpu"):
with tf.name_scope("input"):
# three channel images
self.input_data = tf.placeholder(
shape=input_shape, dtype=self.precision, name="input_data")
self.nums = tf.placeholder(
shape=(self.batch_size), dtype=tf.int32, name="nums")
with ipu.scopes.ipu_scope("/device:IPU:0"):
self.output = ipu.ipu_compiler.compile(
network_func, [self.input_data, self.nums])
self.sess = tf.Session(
config=tf.ConfigProto())
if self.use_moving_avg:
with tf.name_scope("ema"):
ema_obj = tf.train.ExponentialMovingAverage(
self.moving_avg_decay)
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
else:
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weight_file)
def load_data(self):
with open(self.annotation_path, "r") as annotation_file:
# load_all images
lines = []
for line in annotation_file:
lines.append(line)
image_dict = self.testset.load_images(dump=False)
return lines, image_dict
def data_generator(self):
"""Generate input image and write groundtruth info
"""
if os.path.exists(self.write_image_path):
shutil.rmtree(self.write_image_path)
os.mkdir(self.write_image_path)
self.ground_truth_file = open(self.ground_truth_file_path, "w")
image_datas = []
nums = []
for num, line in enumerate(self.lines):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split("/")[-1]
image = self.image_dict[line.strip()]
bbox_data_gt = np.array(
[list(map(int, box.split(","))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt = []
classes_gt = []
else:
bboxes_gt, classes_gt = bbox_data_gt[:,
:4], bbox_data_gt[:, 4]
num_bbox_gt = len(bboxes_gt)
# output ground-truth
self.ground_truth_file.write(str(num)+":\n")
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ",".join(
[class_name, xmin, ymin, xmax, ymax]) + "\n"
self.ground_truth_file.write(bbox_mess)
image_copy = np.copy(image)
org_h, org_w, _ = image.shape
image_data = utils.resize_image(
image_copy, [self.input_size, self.input_size])
# we don't want to pass metadata through pipeline
# so we'll keep it with a dictionary
self.meta_dict[num] = [org_h, org_w, image_name, line]
image_datas.append(image_data)
nums.append(num)
if len(nums) < self.batch_size:
if num < len(self.lines) - 1:
continue
else:
# if there's not enough data to fill the last batch
# we repeat the last image to yield a full sized batch
for _ in range(len(image_datas), self.batch_size):
image_datas.append(image_datas[-1])
nums.append(nums[-1])
image_datas = np.array(image_datas).astype(np.float16)
yield (image_datas, nums)
if num < len(self.lines) - 1:
image_datas = []
nums = []
while True:
# if using infeed_queue. it will need more batches
# to padd the data and meet the required repeat_count
# so we will use last batch for padding
yield (image_datas, nums)
def parse_result(self, pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums):
"""Parse and write predicted result
"""
for i in range(len(nums)):
# if nums value is repeated
# that means nums[i] is a repeated value for matching required batch size
# so we can stop the iteration
if i > 0 and nums[i] <= nums[i-1]:
break
num = nums[i]
pred_sbbox = pred_sbbox_list[i]
pred_mbbox = pred_mbbox_list[i]
pred_lbbox = pred_lbbox_list[i]
org_h, org_w, image_name, line = self.meta_dict[num]
image_path = line.strip().split()[0]
image = self.image_dict[line.strip()]
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(
pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
# convert boxes from input_image coordinate to original image coordinate
bboxes = utils.postprocess_boxes(
pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes_pr = utils.nms(bboxes, self.iou_threshold)
if self.write_image:
image = utils.draw_bbox(
image, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
self.predict_result_file.write(str(num)+":\n")
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = "%.4f" % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ",".join(
[class_name, score, xmin, ymin, xmax, ymax]) + "\n"
self.predict_result_file.write(bbox_mess)
def evaluate(self):
self.predict_result_file = open(self.predicted_file_path, "w")
if self.use_infeed_queue:
# using infeed queue to improve throughput
# we can use an additional thread to run dequeue_outfeed for decrease latency and further improve throughput
total_samples = len(self.lines)
interaction_samples = self.batch_size*self.repeat_count
total_interactions = total_samples/interaction_samples
total_interactions = math.ceil(total_interactions)
for interaction_index in range(total_interactions):
run_start = time.time()
self.sess.run(self.run_loop)
result = self.sess.run(
self.dequeue_outfeed)
run_duration = time.time()-run_start
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = result[
"pred_sbbox"], result["pred_mbbox"], result["pred_lbbox"], result["nums"]
for i in range(len(nums)):
# len(nums) == repeat_count
# there's repeat count number of batches for each run
if i > 0 and nums[i][0] <= nums[i-1][0]:
# ignore repeated data
# these are only for meeting data size required when using ipu.loops.repeat
break
self.parse_result(pred_sbbox_list[i], pred_mbbox_list[i], pred_lbbox_list[i], nums[i])
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}, repeat count: {}".format(
(interaction_index+1)*interaction_samples, len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size,
self.repeat_count))
else:
# if not use infeed_queue, it will return for every batch
data_gen = self.data_generator()
interaction_samples = self.batch_size
total_interactions = math.ceil(len(self.lines)/interaction_samples)
for interaction_index in range(total_interactions):
image_datas, nums = next(data_gen)
run_start = time.time()
pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums = self.sess.run(
self.output,
feed_dict={
self.input_data: image_datas,
self.nums: nums
}
)
run_duration = time.time()-run_start
self.parse_result(pred_sbbox_list, pred_mbbox_list, pred_lbbox_list, nums)
logger.info("progress:{}/{} ,latency: {}, through put: {}, batch size: {}".format(
(interaction_index+1)*interaction_samples,
len(self.lines),
run_duration,
interaction_samples/run_duration,
self.batch_size))
self.ground_truth_file.close()
self.predict_result_file.close()
self.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="evaluation in TensorFlow", add_help=False)
parser.add_argument("--config", type=str, default="config/config_800.json",
help="json config file for yolov3.")
parser.add_argument("--test_path", type=str, default="./data/dataset/voc_test.txt",
help="data path for test")
arguments = parser.parse_args()
with open(arguments.config) as f:
opts = json.load(f)
opts['test']['annot_path'] = arguments.test_path
YoloTest(opts).evaluate()
|
[
"os.mkdir",
"argparse.ArgumentParser",
"tensorflow.python.ipu.config.IPUConfig",
"core.utils.nms",
"tensorflow.ConfigProto",
"shutil.rmtree",
"core.utils.read_class_names",
"core.utils.postprocess_boxes",
"tensorflow.train.ExponentialMovingAverage",
"numpy.copy",
"cv2.imwrite",
"os.path.exists",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.placeholder",
"numpy.reshape",
"tensorflow.name_scope",
"core.utils.resize_image",
"tensorflow.train.Saver",
"math.ceil",
"ipu_utils.stages_constructor",
"tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue",
"tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue",
"json.load",
"tensorflow.python.ipu.scopes.ipu_scope",
"core.yolov3.YOLOV3",
"tensorflow.device",
"tensorflow.python.ipu.loops.repeat",
"time.time",
"core.dataset.Dataset",
"tensorflow.data.Dataset.from_generator",
"numpy.array",
"core.utils.draw_bbox"
] |
[((14187, 14266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""evaluation in TensorFlow"""', 'add_help': '(False)'}), "(description='evaluation in TensorFlow', add_help=False)\n", (14210, 14266), False, 'import argparse\n'), ((781, 828), 'core.utils.read_class_names', 'utils.read_class_names', (["opts['yolo']['classes']"], {}), "(opts['yolo']['classes'])\n", (803, 828), True, 'import core.utils as utils\n'), ((1873, 1894), 'core.dataset.Dataset', 'Dataset', (['"""test"""', 'opts'], {}), "('test', opts)\n", (1880, 1894), False, 'from core.dataset import Dataset\n'), ((1965, 1987), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (1985, 1987), False, 'from tensorflow.python import ipu\n'), ((2079, 2098), 'core.yolov3.YOLOV3', 'YOLOV3', (['(False)', 'opts'], {}), '(False, opts)\n', (2085, 2098), False, 'from core.yolov3 import YOLOV3\n'), ((6215, 6252), 'os.path.exists', 'os.path.exists', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6229, 6252), False, 'import os\n'), ((6311, 6342), 'os.mkdir', 'os.mkdir', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6319, 6342), False, 'import os\n'), ((14646, 14658), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14655, 14658), False, 'import json\n'), ((2774, 2882), 'ipu_utils.stages_constructor', 'stages_constructor', (['[layers]', "['input_data', 'nums']", "['pred_sbbox', 'pred_mbbox', 'pred_lbbox', 'nums']"], {}), "([layers], ['input_data', 'nums'], ['pred_sbbox',\n 'pred_mbbox', 'pred_lbbox', 'nums'])\n", (2792, 2882), False, 'from ipu_utils import stages_constructor\n'), ((3689, 3724), 'tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue', 'ipu_infeed_queue.IPUInfeedQueue', (['ds'], {}), '(ds)\n', (3720, 3724), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((3753, 3788), 'tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue', 'ipu_outfeed_queue.IPUOutfeedQueue', ([], {}), '()\n', (3786, 3788), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((5718, 5734), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5732, 5734), True, 'import tensorflow as tf\n'), ((6266, 6302), 'shutil.rmtree', 'shutil.rmtree', (['self.write_image_path'], {}), '(self.write_image_path)\n', (6279, 6302), False, 'import shutil\n'), ((7546, 7560), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (7553, 7560), True, 'import numpy as np\n'), ((7629, 7695), 'core.utils.resize_image', 'utils.resize_image', (['image_copy', '[self.input_size, self.input_size]'], {}), '(image_copy, [self.input_size, self.input_size])\n', (7647, 7695), True, 'import core.utils as utils\n'), ((10047, 10141), 'core.utils.postprocess_boxes', 'utils.postprocess_boxes', (['pred_bbox', '(org_h, org_w)', 'self.input_size', 'self.score_threshold'], {}), '(pred_bbox, (org_h, org_w), self.input_size, self.\n score_threshold)\n', (10070, 10141), True, 'import core.utils as utils\n'), ((10178, 10215), 'core.utils.nms', 'utils.nms', (['bboxes', 'self.iou_threshold'], {}), '(bboxes, self.iou_threshold)\n', (10187, 10215), True, 'import core.utils as utils\n'), ((11506, 11535), 'math.ceil', 'math.ceil', (['total_interactions'], {}), '(total_interactions)\n', (11515, 11535), False, 'import math\n'), ((3224, 3240), 'tensorflow.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (3233, 3240), True, 'import tensorflow as tf\n'), ((3263, 3394), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['data_gen'], {'output_types': '(tf.float16, tf.int32)', 'output_shapes': '(input_shape, (self.batch_size,))'}), '(data_gen, output_types=(tf.float16, tf.int32\n ), output_shapes=(input_shape, (self.batch_size,)))\n', (3293, 3394), True, 'import tensorflow as tf\n'), ((4167, 4228), 'tensorflow.python.ipu.loops.repeat', 'loops.repeat', (['self.repeat_count', 'model_func', '[]', 'infeed_queue'], {}), '(self.repeat_count, model_func, [], infeed_queue)\n', (4179, 4228), False, 'from tensorflow.python.ipu import ipu_infeed_queue, ipu_outfeed_queue, loops\n'), ((4305, 4342), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (4325, 4342), False, 'from tensorflow.python import ipu\n'), ((4376, 4419), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['my_net'], {'inputs': '[]'}), '(my_net, inputs=[])\n', (4400, 4419), False, 'from tensorflow.python import ipu\n'), ((4807, 4823), 'tensorflow.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (4816, 4823), True, 'import tensorflow as tf\n'), ((5195, 5232), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5215, 5232), False, 'from tensorflow.python import ipu\n'), ((5264, 5332), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['network_func', '[self.input_data, self.nums]'], {}), '(network_func, [self.input_data, self.nums])\n', (5288, 5332), False, 'from tensorflow.python import ipu\n'), ((5481, 5501), 'tensorflow.name_scope', 'tf.name_scope', (['"""ema"""'], {}), "('ema')\n", (5494, 5501), True, 'import tensorflow as tf\n'), ((5529, 5585), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['self.moving_avg_decay'], {}), '(self.moving_avg_decay)\n', (5562, 5585), True, 'import tensorflow as tf\n'), ((10274, 10349), 'core.utils.draw_bbox', 'utils.draw_bbox', (['image', 'bboxes_pr', 'self.classes'], {'show_label': 'self.show_label'}), '(image, bboxes_pr, self.classes, show_label=self.show_label)\n', (10289, 10349), True, 'import core.utils as utils\n'), ((10387, 10441), 'cv2.imwrite', 'cv2.imwrite', (['(self.write_image_path + image_name)', 'image'], {}), '(self.write_image_path + image_name, image)\n', (10398, 10441), False, 'import cv2\n'), ((10558, 10592), 'numpy.array', 'np.array', (['bbox[:4]'], {'dtype': 'np.int32'}), '(bbox[:4], dtype=np.int32)\n', (10566, 10592), True, 'import numpy as np\n'), ((11628, 11639), 'time.time', 'time.time', ([], {}), '()\n', (11637, 11639), False, 'import time\n'), ((13270, 13281), 'time.time', 'time.time', ([], {}), '()\n', (13279, 13281), False, 'import time\n'), ((4616, 4632), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4630, 4632), True, 'import tensorflow as tf\n'), ((4846, 4868), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (4859, 4868), True, 'import tensorflow as tf\n'), ((4951, 5025), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'input_shape', 'dtype': 'self.precision', 'name': '"""input_data"""'}), "(shape=input_shape, dtype=self.precision, name='input_data')\n", (4965, 5025), True, 'import tensorflow as tf\n'), ((5083, 5149), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'self.batch_size', 'dtype': 'tf.int32', 'name': '"""nums"""'}), "(shape=self.batch_size, dtype=tf.int32, name='nums')\n", (5097, 5149), True, 'import tensorflow as tf\n'), ((5414, 5430), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5428, 5430), True, 'import tensorflow as tf\n'), ((8454, 8475), 'numpy.array', 'np.array', (['image_datas'], {}), '(image_datas)\n', (8462, 8475), True, 'import numpy as np\n'), ((9651, 9701), 'numpy.reshape', 'np.reshape', (['pred_sbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_sbbox, (-1, 5 + self.num_classes))\n', (9661, 9701), True, 'import numpy as np\n'), ((9743, 9793), 'numpy.reshape', 'np.reshape', (['pred_mbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_mbbox, (-1, 5 + self.num_classes))\n', (9753, 9793), True, 'import numpy as np\n'), ((9880, 9930), 'numpy.reshape', 'np.reshape', (['pred_lbbox', '(-1, 5 + self.num_classes)'], {}), '(pred_lbbox, (-1, 5 + self.num_classes))\n', (9890, 9930), True, 'import numpy as np\n'), ((11798, 11809), 'time.time', 'time.time', ([], {}), '()\n', (11807, 11809), False, 'import time\n'), ((13601, 13612), 'time.time', 'time.time', ([], {}), '()\n', (13610, 13612), False, 'import time\n')]
|
"""
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pathlib import Path
from typing import Dict
from autohyper import optimize, LowRankMetrics, HyperParameters
from torchvision import datasets, transforms
from torch.optim import Adam
from gutils import init_logger
import torchvision.models as models
import numpy as np
import torch
def main():
# indicate which hyper-parameters to optimize
dataset = torch.utils.data.DataLoader(
datasets.CIFAR10('.', download=True, transform=transforms.ToTensor()),
batch_size=128)
def epoch_trainer(hyper_parameters: Dict[str, float],
epochs) -> LowRankMetrics:
# update model/optimizer parameters based on values in @argument:
# hyper_parameters
print('Run epochs:', hyper_parameters)
model = models.resnet18()
model.train()
model = model.cuda()
metrics = LowRankMetrics(list(model.parameters()))
optimizer = Adam(model.parameters(),
lr=hyper_parameters['lr'],
weight_decay=hyper_parameters['weight_decay'],)
criterion = torch.nn.CrossEntropyLoss().cuda()
accs = list()
for epoch in epochs:
for inputs, targets in dataset:
inputs = inputs.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
accs.append(accuracy(outputs, targets)[0].item())
# run epoch training...
# at every epoch, evaluate low_rank metrics
print(f"Epoch {epoch} | Loss {np.mean(accs)}")
metrics.evaluate()
return metrics
hyper_parameters = HyperParameters(lr=True, weight_decay=True)
final_hp = optimize(epoch_trainer=epoch_trainer,
hyper_parameters=hyper_parameters)
final_hyper_parameters_dict = final_hp.final()
# do your final training will optimized hyper parameters
epoch_trainer(final_hyper_parameters_dict, epochs=range(250))
def accuracy(outputs, targets, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, pred = outputs.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.contiguous().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous(
).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
logger = init_logger(Path('logs'))
main()
|
[
"autohyper.HyperParameters",
"torchvision.models.resnet18",
"autohyper.optimize",
"torch.nn.CrossEntropyLoss",
"pathlib.Path",
"numpy.mean",
"torch.no_grad",
"torchvision.transforms.ToTensor"
] |
[((2854, 2897), 'autohyper.HyperParameters', 'HyperParameters', ([], {'lr': '(True)', 'weight_decay': '(True)'}), '(lr=True, weight_decay=True)\n', (2869, 2897), False, 'from autohyper import optimize, LowRankMetrics, HyperParameters\n'), ((2913, 2985), 'autohyper.optimize', 'optimize', ([], {'epoch_trainer': 'epoch_trainer', 'hyper_parameters': 'hyper_parameters'}), '(epoch_trainer=epoch_trainer, hyper_parameters=hyper_parameters)\n', (2921, 2985), False, 'from autohyper import optimize, LowRankMetrics, HyperParameters\n'), ((1837, 1854), 'torchvision.models.resnet18', 'models.resnet18', ([], {}), '()\n', (1852, 1854), True, 'import torchvision.models as models\n'), ((3243, 3258), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3256, 3258), False, 'import torch\n'), ((3748, 3760), 'pathlib.Path', 'Path', (['"""logs"""'], {}), "('logs')\n", (3752, 3760), False, 'from pathlib import Path\n'), ((1513, 1534), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1532, 1534), False, 'from torchvision import datasets, transforms\n'), ((2155, 2182), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2180, 2182), False, 'import torch\n'), ((2759, 2772), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (2766, 2772), True, 'import numpy as np\n')]
|
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from mojang import MojangAPI as Mojang
from pyosu import OsuApi
import discord
import pyosu
from custom_funcs import embed_create, is_uuid4
def sync_minecraft(ctx, account):
try:
if is_uuid4(account):
uuid = account
else:
uuid = Mojang.get_uuid(account)
profile = Mojang.get_profile(uuid)
if not profile:
return embed_create(ctx, title="Error!", description="Account not found!", color=0xeb4034)
name_history = Mojang.get_name_history(uuid)
except Exception:
return embed_create(ctx, title="Error!", description="Can't lookup account! (API down?)", color=0xeb4034)
past_names = [data['name'] for data in name_history if data['name'] != profile.name]
embed = embed_create(ctx, title="Minecraft account info:")
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/632730054396215299/825080584451391529/grass.png")
embed.add_field(name="Current Username:", value=discord.utils.escape_markdown(profile.name), inline=False)
embed.add_field(name="Profile UUID:", value=profile.id, inline=False)
embed.add_field(name="Past Usernames:",
value=(discord.utils.escape_markdown(", ".join(past_names)) if past_names else "No past usernames"),
inline=False)
embed.add_field(name="Skin:",
value=f"[Download Skin ({'Steve Type' if not profile.skin_model == 'slim' else 'Alex Type'})]({profile.skin_url})" if profile.skin_url else "No skin",
inline=False)
embed.add_field(name="Is legacy account?:", value="Yes" if profile.is_legacy_profile else "No", inline=False)
# Dream's UUID
if profile.id == 'ec70bcaf702f4bb8b48d276fa52a780c':
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/632730054396215299/827393984875855982/ForsenCD-emote.jpg")
return embed
def mode_convert(mode):
if not mode:
return 0, ""
mode = mode.lower()
if mode in ["s", "standard", "osu", "osu!", "std", "0"]:
return 0, ""
elif mode in ["taiko", "t", "osu!taiko", "1"]:
return 1, "taiko"
elif mode in ["c", "catch", "ctb", "osu!catch", "2"]:
return 2, "catch"
elif mode in ["m", "mania", "osu!mania", "3"]:
return 3, "mania"
else:
return 0, ""
class GameCog(commands.Cog, name="Game Info"):
def __init__(self, bot):
self.osu_api = OsuApi(bot.secrets["OSU_API_KEY"])
self.bot = bot
print("GameCog init")
@commands.cooldown(1, 5, BucketType.user)
@commands.command(aliases=["mc"])
async def minecraft(self, ctx, account):
"""Gets info of minecraft accounts using current username or their UUID"""
async with ctx.channel.typing():
embed = await self.bot.loop.run_in_executor(None, sync_minecraft, ctx, account)
await ctx.send(embed=embed)
@commands.cooldown(1, 5, BucketType.user)
@commands.command(aliases=["osu!"])
async def osu(self, ctx, account, gamemode=None):
"""Gets info of osu! accounts! You can also specify"""
async with ctx.channel.typing():
mode_int, mode_name = mode_convert(gamemode)
osu_obj = await self.osu_api.get_user(user=account, mode=mode_int)
if not isinstance(osu_obj, pyosu.models.User):
embed = embed_create(ctx, title="Account not found!",
description="If you are trying to get user info, use their username or user id.",
color=0xeb4034)
elif isinstance(osu_obj, pyosu.models.User):
embed = embed_create(ctx, title=f"information for osu!{mode_name} account:")
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/632730054396215299/825081328146841600/osu.png")
embed.add_field(name="General Info:",
value=f"Username: {osu_obj.username}\nUser ID: {osu_obj.user_id}\nLevel: {int(osu_obj.level)}\nCountry: {osu_obj.country}",
inline=False)
embed.add_field(name="Ranking Info:",
value=f"PP Score: {int(osu_obj.pp_raw)}\nRanked Score: {int(osu_obj.ranked_score)}\nTotal Score: {int(osu_obj.total_score)}\nPP Rank: {osu_obj.pp_rank}th\nCountry PP Rank: {osu_obj.pp_country_rank}th",
inline=False)
embed.add_field(name="Play Info:",
value=f"Accuracy: {int(osu_obj.accuracy)}%\n{osu_obj.playcount} (good) beatmaps played!\nAmount of SSH ranks: {osu_obj.count_rank_ssh}\nAmount of SS ranks: {osu_obj.count_rank_ss}\nAmount of SH ranks: {osu_obj.count_rank_sh}\nAmount of S ranks: {osu_obj.count_rank_s}\nAmount of A ranks: {osu_obj.count_rank_a}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(GameCog(bot))
|
[
"discord.utils.escape_markdown",
"discord.ext.commands.command",
"mojang.MojangAPI.get_profile",
"mojang.MojangAPI.get_uuid",
"discord.ext.commands.cooldown",
"custom_funcs.is_uuid4",
"pyosu.OsuApi",
"custom_funcs.embed_create",
"mojang.MojangAPI.get_name_history"
] |
[((877, 927), 'custom_funcs.embed_create', 'embed_create', (['ctx'], {'title': '"""Minecraft account info:"""'}), "(ctx, title='Minecraft account info:')\n", (889, 927), False, 'from custom_funcs import embed_create, is_uuid4\n'), ((2698, 2738), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'BucketType.user'], {}), '(1, 5, BucketType.user)\n', (2715, 2738), False, 'from discord.ext import commands\n'), ((2745, 2777), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['mc']"}), "(aliases=['mc'])\n", (2761, 2777), False, 'from discord.ext import commands\n'), ((3088, 3128), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'BucketType.user'], {}), '(1, 5, BucketType.user)\n', (3105, 3128), False, 'from discord.ext import commands\n'), ((3135, 3169), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['osu!']"}), "(aliases=['osu!'])\n", (3151, 3169), False, 'from discord.ext import commands\n'), ((296, 313), 'custom_funcs.is_uuid4', 'is_uuid4', (['account'], {}), '(account)\n', (304, 313), False, 'from custom_funcs import embed_create, is_uuid4\n'), ((424, 448), 'mojang.MojangAPI.get_profile', 'Mojang.get_profile', (['uuid'], {}), '(uuid)\n', (442, 448), True, 'from mojang import MojangAPI as Mojang\n'), ((602, 631), 'mojang.MojangAPI.get_name_history', 'Mojang.get_name_history', (['uuid'], {}), '(uuid)\n', (625, 631), True, 'from mojang import MojangAPI as Mojang\n'), ((2600, 2634), 'pyosu.OsuApi', 'OsuApi', (["bot.secrets['OSU_API_KEY']"], {}), "(bot.secrets['OSU_API_KEY'])\n", (2606, 2634), False, 'from pyosu import OsuApi\n'), ((378, 402), 'mojang.MojangAPI.get_uuid', 'Mojang.get_uuid', (['account'], {}), '(account)\n', (393, 402), True, 'from mojang import MojangAPI as Mojang\n'), ((494, 582), 'custom_funcs.embed_create', 'embed_create', (['ctx'], {'title': '"""Error!"""', 'description': '"""Account not found!"""', 'color': '(15417396)'}), "(ctx, title='Error!', description='Account not found!', color=\n 15417396)\n", (506, 582), False, 'from custom_funcs import embed_create, is_uuid4\n'), ((671, 774), 'custom_funcs.embed_create', 'embed_create', (['ctx'], {'title': '"""Error!"""', 'description': '"""Can\'t lookup account! (API down?)"""', 'color': '(15417396)'}), '(ctx, title=\'Error!\', description=\n "Can\'t lookup account! (API down?)", color=15417396)\n', (683, 774), False, 'from custom_funcs import embed_create, is_uuid4\n'), ((1100, 1143), 'discord.utils.escape_markdown', 'discord.utils.escape_markdown', (['profile.name'], {}), '(profile.name)\n', (1129, 1143), False, 'import discord\n'), ((3554, 3706), 'custom_funcs.embed_create', 'embed_create', (['ctx'], {'title': '"""Account not found!"""', 'description': '"""If you are trying to get user info, use their username or user id."""', 'color': '(15417396)'}), "(ctx, title='Account not found!', description=\n 'If you are trying to get user info, use their username or user id.',\n color=15417396)\n", (3566, 3706), False, 'from custom_funcs import embed_create, is_uuid4\n'), ((3857, 3925), 'custom_funcs.embed_create', 'embed_create', (['ctx'], {'title': 'f"""information for osu!{mode_name} account:"""'}), "(ctx, title=f'information for osu!{mode_name} account:')\n", (3869, 3925), False, 'from custom_funcs import embed_create, is_uuid4\n')]
|
import math
# Nb grid square
GRID_WIDTH = 10
GRID_HEIGHT = 10
# Absolute size of a grid square
GRIDSIZE = 20
# Size of window
SCREEN_WIDTH = GRID_WIDTH * GRIDSIZE
SCREEN_HEIGHT = GRID_HEIGHT * GRIDSIZE
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
# Returns true if a and b have same signs
def same_sign(a, b):
return (a > 0) == (b > 0)
# Important : clockwise
directions = [UP, RIGHT, DOWN, LEFT]
# Get local right direction from our current direction
def get_local_right(direction):
i = directions.index(direction) + 1
return directions[i % 4]
def get_local_down(direction):
i = directions.index(direction) + 2
return directions[i % 4]
def get_local_left(direction):
i = directions.index(direction) + 3
return directions[i % 4]
# Return a + b with a and b being tuples
def add_tuple(a, b):
res = (0, 0)
res[0] = a[0] + b[0]
res[1] = a[1] + b[1]
return res
# Pythagorian distance between two points
def distance(a, b):
return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
|
[
"math.sqrt"
] |
[((997, 1047), 'math.sqrt', 'math.sqrt', (['((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)'], {}), '((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n', (1006, 1047), False, 'import math\n')]
|
# Generated by Django 3.1.5 on 2021-04-27 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('video', '0002_auto_20210427_1508'),
]
operations = [
migrations.AlterField(
model_name='video',
name='views',
field=models.IntegerField(default=0, verbose_name='Views count'),
),
]
|
[
"django.db.models.IntegerField"
] |
[((332, 390), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""Views count"""'}), "(default=0, verbose_name='Views count')\n", (351, 390), False, 'from django.db import migrations, models\n')]
|
import asyncio
from datetime import datetime
from itertools import combinations
import json
import glob
import os
import time
from typing import Optional, Tuple
import re
import sys
from aiohttp.client import ClientSession, TCPConnector
import redis
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from configs.base.consts import CONFIG_DIR, ASYNC_SLEEP
from core import logger
from core import async_queue, filtration, async_write
from core.async_requests import AsyncHttpRequests
from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb
from parser_scripts import google, googlenews
from sys import platform
if platform != 'win32':
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
basiclogger = logger.rabbit_logger(__name__)
# todo or_combine_sites param -> lob searches -> measure perf impact
class SearchService(object):
"""
The Search Service is a generic class that is used by any process in order to accept search inputs (e.g. search
terms, keywords, sites, etc.), perform searches, and pass the results to another service. The Search Service
implements a search query generator + async http requests + webpage Data Parser (google or googlenews) + async
write to write outputs to a file.
"""
SEARCH_SERVICE_CONFIG_PATH = os.path.normpath(os.path.join(CONFIG_DIR, 'service', 'search.json')) # Proxy info, search sites, etc.
BUSINESS_CONFIGS_DIR = os.path.normpath(os.path.join(CONFIG_DIR, 'business_drivers', 'search')) # NegMedia/SJA/LOB/etc
CLIENT_CONFIGS_DIR = os.path.normpath(os.path.join(CONFIG_DIR, 'client'))
BUSINESS_CONFIGS_PATHS = glob.glob(os.path.join(BUSINESS_CONFIGS_DIR, '*.json'))
CLIENT_CONFIGS_PATHS = glob.glob(os.path.join(CLIENT_CONFIGS_DIR, '*', '*.json'))
def __init__(self, input_queue: asyncio.Queue, publish_queue: asyncio.Queue):
"""
Args:
input_queue:
publish_queue:
"""
self.service_configs = load_config(self.SEARCH_SERVICE_CONFIG_PATH)
self.business_configs = make_config(self.BUSINESS_CONFIGS_PATHS)
self.client_configs = make_config(self.CLIENT_CONFIGS_PATHS)
# todo pass in only the required params
self.async_http_requests = AsyncHttpRequests(**self.service_configs)
self.input_queue = input_queue
self.publish_queue = publish_queue
self.config_refresh = time.time()
##################3
# self.service_configs['redis_db_params']['host'] = 'localhost'
self.search_history = redis.Redis(**self.service_configs['redis_db_params'])
def start(self):
"""
Starts the Search Service by creating a worker daemons for each number of TCP connections for the desired
concurrency. After a worker completes a url request, it sends the response to a webpage parser, which extracts
the urls. Finally, the extracted urls are individually placed on messages for another service.
Issues are saved in an audit file for review
Returns:
workers, queues, session
"""
# use an intermediate queue here to limit number of messages waiting in the service.
query_queue = async_queue.get_queue(maxsize=200)
parse_queue = async_queue.get_queue()
write_queue = async_queue.get_queue()
# async with ClientSession(connector=TCPConnector(limit=self.async_http_requests.connections, ssl=False)) as \
# session:
session = ClientSession(connector=TCPConnector(limit=self.async_http_requests.connections, ssl=False))
workers = []
for _ in range(self.async_http_requests.connections):
task = asyncio.create_task(self.put_onto_query_queue(query_queue)
)
workers.append(task)
task = asyncio.create_task(async_queue.worker(query_queue, parse_queue,
self.async_http_requests.handle_requests,
session=session,
)
)
workers.append(task)
task = asyncio.create_task(parse_consumer(next_queue=parse_queue, write_queue=write_queue,
))
workers.append(task)
task = asyncio.create_task(
self.write_consumer(audit_path='audit.txt', write_queue=write_queue, publish_queue=self.publish_queue)
)
workers.append(task)
queues = [self.input_queue, self.publish_queue, query_queue, parse_queue, write_queue]
return workers, queues, session
async def put_onto_query_queue(self, query_queue: asyncio.Queue):
"""
Takes a message off of the input queue. Extracts the query and the service/business/client configuration and
places it on the query queue
Args:
query_queue:
Returns:
"""
while True:
await asyncio.sleep(ASYNC_SLEEP)
while self.input_queue.qsize():
try:
if query_queue.full():
await asyncio.sleep(ASYNC_SLEEP)
continue
# if self.input_queue.qsize():
message: dict = await self.input_queue.get()
if isinstance(message, bytes):
message = json.loads(message)
basiclogger.info(message)
entity, business_configuration, client_search_configs = self.make_configuration(message)
query_params = self._assign_query_gen_inputs(entity, business_configuration, client_search_configs)
arb_fields, message = pop_arb_field_if_exists(message)
parse = google.parse if 'google' == business_configuration['service'] else googlenews.parse
# construct query from message fields and configs
for query in self._query_gen(business_configuration, **query_params):
query['parse_func'] = parse
query['client'] = message['client']
query['business_function'] = message['business_function']
# add any arb fields.
query = set_arb(msg=query, arb=arb_fields)
if filtration.filter_entities_redis(entity, query['language'], message['business_function'],
self.business_configs['search'][message['business_function']]['refresh_period'],
self.search_history):
await query_queue.put(query)
else:
basiclogger.info(f"entity combo searched recently: {entity}|"
f"{query['language']}|{message['business_function']}")
except Exception as exc:
basiclogger.error(exc.__repr__())
self.input_queue.task_done()
await asyncio.sleep(ASYNC_SLEEP)
async def write_consumer(self, audit_path: str,
write_queue: asyncio.Queue,
publish_queue: Optional[asyncio.Queue]):
"""
used by the Search Service only right now.
.. todo:: this could be refactored by async_queue.worker
Args:
audit_path: path to audit file
publish_queue:
write_queue: asyncio.Queue where each queue item is a list of asyncio.Futures for parsing the responses
Returns:
"""
while True:
await asyncio.sleep(ASYNC_SLEEP)
futures = []
for ctr in range(write_queue.qsize()):
futures.append(write_queue.get_nowait())
write_queue.task_done()
while futures:
for _ in asyncio.as_completed(futures):
parsed_output = await _
# await async_write.write_data(output_path, f"{'|'.join(parsed_output['parsed_output'])}\n", 'a',
# encoding=None)
regexp_entity = re.sub('\W+', '', parsed_output['entity']).upper()
# popping any arb field so it doesn't get saved in the search history
arb_field, parsed_output = pop_arb_field_if_exists(parsed_output)
self.search_history.set(f"{regexp_entity}|{parsed_output['language']}|{parsed_output['business_function']}", json.dumps(parsed_output))
if publish_queue and parsed_output:
try:
# if no articles were found with the search, don't publish a message.
if 'urls' in parsed_output and 'cache_urls' in parsed_output:
for url, cache_url in zip(parsed_output['urls'], parsed_output['cache_urls']):
# check if url or cache_url is garbage
keep_url = filtration.filter_garbage_url_search_result(url,
self.business_configs['search'][
parsed_output[
'business_function']][
'url_filter_exclusion_patterns'])
keep_cache_url = True # default if no cache_url present
if cache_url:
keep_cache_url = filtration.filter_garbage_url_search_result(
cache_url, self.business_configs['search'][parsed_output['business_function']]['url_filter_exclusion_patterns']
)
if keep_url and keep_cache_url:
msg = {'url': url,
'cache_url': cache_url,
'client': parsed_output['client'],
'business_function': parsed_output['business_function'],
'date': parsed_output['date'],
'language': parsed_output['language'],
'entity': parsed_output['entity']
}
if arb_field:
msg = set_arb(msg=msg, arb=arb_field)
await publish_queue.put(msg)
else:
basiclogger.debug(f"No results found for: {parsed_output}")
except Exception as exc:
basiclogger.error(exc.__repr__())
basiclogger.info(f'error_parsed_output: {parsed_output}')
if 'error' in parsed_output and parsed_output['error']:
await async_write.write_data(audit_path, f"{parsed_output['error']}\n", 'a',
encoding=None)
if futures:
futures = [i for i in futures if not i.done()]
def _query_gen(self, business_configuration, **kwargs) -> dict:
"""
Builds a query for Google search. Wraps the entity and the keyword in double quotes. Calls other helper
functions for query creation.
- Uses the language class attribute to incorporate language specific component to url.
- The html encoded entities are created using the entities instance attribute called with
the`_encode_make_phrase` method.
- Combines all the keywords with quotes, spaces, and OR if applicable according to or_combine_kw instance
attribute and calling _encode_keyword_combos method
- Creates `site:website` if websites provided as website
instance attribute, and combines with OR if applicable and calling _encode_site_search method
- Creates date to-from string if date range provided as the date_range instance attribute and calling
` _encode_date_ranges` method
- Creates keywords_excluded string if they were provided in keywords_excluded
instance attribute and calling _encode_keywords_excluded method.
Args: entities (list): Entities list if user wanted to override self.entities
Returns:
Generator of dicts of (entity, query, cache_url, retries)
Examples:
Input:
service = ‘google’
entities = [“<NAME>”]
keywords = {'es': [“crime”,”launder”]}
or_combine_kw = True
date_range=['w']
keywords_exclusion = ['yahoo.com']
websites = ['bloomberg.com']
language = 'es'
Output:
Resulting URL:
https://www.google.com/search?safe=strict&q="<NAME>" AND ("crime" OR "launder") site:bloomberg.com
-yahoo.com&hl=es-419&gl=US&ceid=US:es-419&tbs=qdr:w
Resulting URL (encoded):
'https://www.google.com/search?safe=strict&q=%22Joe+Black%22+AND+%28%22crime%22+OR+%22launder%22%29+site
%3Abloomberg.com%20-yahoo.com&hl=es-419&gl=US&ceid=US:es-419+&tbs=qdr%3Aw'
"""
entities = kwargs['entity']
# quote = '"'
quote = '%22'
# space = '%20'
space = '+'
# self.encoded_excluded_kw = self._encode_keywords_excluded()
for language in kwargs['language']:
start_url, host_lang = self._get_start_url_host_lang(language, business_configuration)
encoded_excluded_kw = self._encode_keywords_excluded(kwargs['keywords_excluded'][language])
if not kwargs['or_combine_kw']:
for entity in entities:
for keyword in self._encode_keyword_combos(kwargs['keywords'][language]):
# todo could also replace US with MX, CL or other spanish speaking country.
# Need to assess results though
for web in self._encode_site_search(kwargs['websites'], kwargs['or_combine_websites']):
for dates in self._encode_date_ranges([kwargs['date_range']]):
space2 = '' if not web and not dates else '+'
if len(keyword[1].split(' ')) == 1:
# search for "ENTITY" keyword_one_word
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}{quote}"
f"{keyword[1].replace(' ', '+')}{quote}{space}{web}{space}"
f"{encoded_excluded_kw}{host_lang}{space2}{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
else:
# search for "ENTITY" "keyword with multiple words"
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}"
f"{keyword[0].replace(' ', '+')}{space}{web}{encoded_excluded_kw}"
f"{host_lang}{space2}{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
else:
# TODO refactor this
open_parens = '%28'
close_parens = '%29'
# for language in self.language:
start_url, host_lang = self._get_start_url_host_lang(language, business_configuration)
OR_keywords = '("' + '" OR "'.join(map(str, kwargs['keywords'][language])) + '")'
# apply html encodings
OR_keywords = OR_keywords.replace(' ', '+')\
.replace('(', open_parens)\
.replace(')', close_parens)\
.replace('"', quote)
# OR_keywords = OR_keywords.replace(' ', '+')
# OR_keywords = OR_keywords.replace('(', open_parens)
# OR_keywords = OR_keywords.replace(')', close_parens)
# OR_keywords = OR_keywords.replace('"', quote)
for entity in entities:
for web in self._encode_site_search(kwargs['websites'], kwargs['or_combine_websites']):
for dates in self._encode_date_ranges([kwargs['date_range']]):
space2 = '' if not web and not dates else '+'
yield {'entity': entity,
'url': f"{start_url}{self._encode_make_phrase(entity, space)}AND{space}"
f"{OR_keywords}{space}{web}{encoded_excluded_kw}{host_lang}{space2}"
f"{dates}".strip('+'),
'cache_url': '',
'retries': 0,
'response_encoding': None,
'language': language,
'proxy_account': business_configuration['proxy_account']
}
def update_configs(self):
# todo this should be outside the class or in a base class
# todo replace this with push notification
now = time.time()
# read in configs every hour
if (now - self.config_refresh) / 3600 > 1:
# Check if there are new configs
# todo potential breakage if there are same outermost keys in the config files
self.BUSINESS_CONFIGS_PATHS = glob.glob(os.path.join(self.BUSINESS_CONFIGS_DIR, '*.json'))
self.CLIENT_CONFIGS_PATHS = glob.glob(os.path.join(self.CLIENT_CONFIGS_DIR, '*', '*.json'))
self.service_configs = load_config(self.SEARCH_SERVICE_CONFIG_PATH)
self.business_configs = make_config(self.BUSINESS_CONFIGS_PATHS)
self.client_configs = make_config(self.CLIENT_CONFIGS_PATHS)
self.async_http_requests = AsyncHttpRequests(**self.service_configs)
self.config_refresh = time.time()
basiclogger.info('search service configs updated')
def check_configs(self, business_function, client):
if business_function not in self.business_configs['search'] or \
client not in self.client_configs[business_function]:
self.update_configs()
if business_function not in self.business_configs['search']:
# todo for now default to negative media
business_function = 'media'
if client not in self.client_configs[business_function]:
client = 'default'
return business_function, client
def make_configuration(self, message):
"""
Extract configuration for a specific search
Args:
message (dict): JSON response message containing instructions on how to do a search
Returns:
"""
# todo this should be outside the class?
entity = message['entity']
client = message['client'] if 'client' in message else ''
business_function = message['business_function']
# check that client/business function exist in the configs. If not update. If still not, then provide a default
business_function, client = self.check_configs(business_function, client)
business_configuration = self.business_configs['search'][business_function]
client_search_configs = self.client_configs[business_function][client]
return entity, business_configuration, client_search_configs
def _assign_query_gen_inputs(self, entity, business_configuration, client_search_configs):
# entities: list = entities if entities else [''] # _query_gen Need >=1 entity if looping over other params
keywords: dict = client_search_configs['keywords'] if 'keywords' in client_search_configs else \
business_configuration['keywords']
language: list = client_search_configs['language'] if 'language' in client_search_configs else \
list(keywords.keys())
websites: list = client_search_configs['websites'] if 'websites' in client_search_configs else \
business_configuration['websites']
or_combine_websites: bool = client_search_configs['or_combine_websites'] if 'or_combine_websites' in client_search_configs else \
business_configuration['or_combine_websites']
or_combine_kw: bool = client_search_configs['or_combine_kw'] if 'or_combine_kw' in client_search_configs else \
business_configuration['or_combine_kw']
keywords_excluded: list = client_search_configs[
'keywords_excluded'] if 'keywords_excluded' in client_search_configs else \
business_configuration['keywords_excluded']
# todo add date_range if passed as an arb message parameter
date_range: list = client_search_configs['date_range'] if 'date_range' in client_search_configs else \
business_configuration['date_range']
query_params = {'entity': [entity],
'keywords': keywords,
'language': language,
'websites': websites,
'or_combine_kw': or_combine_kw,
'or_combine_websites': or_combine_websites,
'keywords_excluded': keywords_excluded,
'date_range': date_range}
self._type_checking(**query_params)
return query_params
def _get_start_url_host_lang(self, language: str, business_configuration) -> Tuple[str, str]:
"""
Read service configs to obtain the start_url and host_lang parameters
Args:
language (str): language for the search
Returns:
start_url (str): start_url for the TLD
host_lang (str): language and geolocation-specific encoding applied to the url
"""
options = self.service_configs['service_option'][business_configuration['service']][language]
start_url = options['start_url']
host_lang = options['host_lang']
return start_url, host_lang
@staticmethod
def _encode_make_phrase(words: str, space: str):
"""
Helper function. Makes a HTML encoded string using words and spaces using html encodings to replaces spaces and
quotes with their % counterparts (ex. Space ('') = '+' and so on). Space variable can be anything to add
after the double quotes following the word.
Args:
words (str): Word to make phrase with
space (str): space
Returns:
empty string if word is None else word wrapped in quotes with space.
"""
if not words:
return ''
else:
return '%22{0}%22{1}'.format(words.replace(' ', '+').replace('&', '%26'), space)
@staticmethod
def _encode_site_search(websites: list, or_combine_websites: bool):
"""
Adds site:http://www.website.com to Google search. If website == 'None', then default case is to NOT include
site:example.com. This is useful if a search would like to search specific sites as well as not.
Ex. `site:xyz.com` for one search and not including this string in another search.
Returns:
string 'site:website.com' if websites else empty string
"""
colon = '%3A'
# default case, no websites
if websites is None or len(websites) == 0:
yield '' # '%20'
if not or_combine_websites:
for website in websites:
if website and not (website == 'None'):
yield 'site' + colon + website
else:
yield '' # '%20'
else:
if len(websites) == 1:
yield 'site' + colon + websites[0]
else:
yield 'site' + colon + '%20OR%20site%3A'.join(map(str, websites))
@staticmethod
def _encode_keywords_excluded(keywords_excluded):
"""
Generate string of filter keywords. This is to remove those keywords from search results.
This is akin to typing: -excluded_kw1 -excluded_kw2 etc. In the search bar.
Returns:
string
Examples:
Input:
keywords_excluded = ['a', 'b', 'c']
Output:
'%20-a%20-b%20-c'
"""
# default case, no filter keywords
if keywords_excluded is None or len(keywords_excluded) == 0:
return '' # '%20'
if keywords_excluded is not None:
return ''.join('%20-' + i for i in keywords_excluded)
# and for any other weird reason
# return '' # '%20'
@staticmethod
def _encode_date_ranges(date_range: list):
"""
Filter search results by date. Takes date_range list and parses the info inside and creates the string to be
added to the query.
Possible options for the element inside the date_range list are:
- "anytime" or "a"
- "hour" or "h"(past hour)
- "day" or "d" (past day)
- "week" or "w" (past week)
- "month" or "m" (past month)
- "year" or "y" (past year)
- mm/dd/yyyy,mm/dd/yyyy (between two dates with earliest first)
* The code handles the case where the two dates are out of order.
* The delimeter between the two dates can be one of ‘,- ‘ (comma, hyphen or space)
Returns:
string with the HTML encoded date
Examples:
Input:
date1, date2 are format mm/dd/yyyy,mm/dd/yyyy
Output:
'&tbs=cdr%3A1%2Ccd_min%3A' + date1 + '%2Ccd_max%3A' + date2
"""
# if (date_range is None) or (len(date_range) == 0):
# yield '' # '%20'
# %3A is a colon :
pre_string = '&tbs=qdr%3A'
for date_entry in date_range:
if (date_entry is None) or (not date_entry) or (isinstance(date_entry, list) and len(date_entry) == 0):
yield ''
elif date_entry == 'anytime' or date_entry == 'a':
yield '&tbas=0'
elif date_entry == 'hour' or date_entry == 'h':
yield pre_string + 'h'
elif date_entry == 'day' or date_entry == 'd':
yield pre_string + 'd'
elif date_entry == 'week' or date_entry == 'w':
yield pre_string + 'w'
elif date_entry == 'month' or date_entry == 'm':
yield pre_string + 'm'
elif date_entry == 'year' or date_entry == 'y':
yield pre_string + 'y'
else:
try:
# if any, and find separator
if ',' in date_entry:
date1, date2 = date_entry.split(',')
elif ' ' in date_entry:
date1, date2 = date_entry.split(' ')
else:
date1, date2 = date_entry.split('-')
date1_check = datetime.strptime(date1, '%m/%d/%Y')
date2_check = datetime.strptime(date2, '%m/%d/%Y')
# check that dates are in order of min date , max date
if date2_check < date1_check:
date1, date2 = date2, date1
date1.replace('/', '%2F')
date2.replace('/', '%2F')
yield '&tbs=cdr%3A1%2Ccd_min%3A' + date1 + '%2Ccd_max%3A' + date2
except Exception as exc:
basiclogger.info(exc)
# if all else fails, yield ''
yield '' # '%20'
@staticmethod
def _encode_keyword_combos(keywords: list, degree: int = 1):
"""
Generates tuples for combinations of keywords. See here for more information on how the degree works:
https://en.wikipedia.org/wiki/Binomial_coefficient Most times we want to use degree=1. However, for research
purposes we may want to test out different keyword combinations.
Args:
keywords (list): list of keywords. If provided from config file, provide self.keywords[language] to get
the list
degree (int): Degree of combinations (the k in n_choose_k from Probability & Statistics
Returns:
Generator tuples of form (comb(keywords,1), keyword)
Examples:
k = ['a', 'b', 'c', 'd']
Input:
combos(k,2)
Output:
("a"%20"b", 'a b')
("a"%20"c", 'a c')
("a"%20"d", 'a d')
("b"%20"c", 'b c')
("b"%20"d", 'b d')
("c"%20"d", 'c d')
"""
quote = '%22'
if len(keywords) == 0:
yield '%20', '%20'
for x in list(combinations(keywords, degree)):
if not x:
# for empty string
yield '%20', '%20'
# yield ('"' + '"%20"'.join(x) + '"', ' '.join(x))
yield quote + '%22%20%22'.join(x) + quote, ' '.join(x)
@staticmethod
def _type_checking(**kwargs):
"""
Rudimentary type checking of input variables. This is run at the end of the __init__ instance of a class.
Returns:
None, if checked inputs pass given criteria
"""
if (not isinstance(kwargs['entity'], list) or len(kwargs['entity']) == 0) and (
not isinstance(kwargs['keywords'], dict)):
raise ValueError('entities and keywords must both be lists with len >0')
if not (isinstance(kwargs['keywords_excluded'], dict) or kwargs['keywords_excluded'] is None):
raise TypeError('filter keywords must be of type == dict with the key a supported language string')
if len(set(kwargs['keywords'])) < len(kwargs['keywords']):
raise ValueError('Don\'t use duplicate keywords.')
if kwargs['keywords_excluded'] is not None:
for filtered in kwargs['keywords_excluded']:
if len(filtered.replace(' ', '')) == 0:
raise ValueError('Don\'t use empty quotes or space in filter. '
'Make sure there is no end line of file.')
|
[
"core.async_write.write_data",
"aiohttp.client.TCPConnector",
"json.dumps",
"asyncio.as_completed",
"os.path.join",
"core.async_queue.get_queue",
"redis.Redis",
"core.utils.set_arb",
"os.path.abspath",
"json.loads",
"core.async_queue.worker",
"uvloop.EventLoopPolicy",
"re.sub",
"core.utils.parse_consumer",
"asyncio.sleep",
"itertools.combinations",
"core.logger.rabbit_logger",
"datetime.datetime.strptime",
"core.async_requests.AsyncHttpRequests",
"core.utils.load_config",
"core.utils.make_config",
"time.time",
"core.utils.pop_arb_field_if_exists",
"core.filtration.filter_entities_redis",
"core.filtration.filter_garbage_url_search_result"
] |
[((794, 824), 'core.logger.rabbit_logger', 'logger.rabbit_logger', (['__name__'], {}), '(__name__)\n', (814, 824), False, 'from core import logger\n'), ((752, 776), 'uvloop.EventLoopPolicy', 'uvloop.EventLoopPolicy', ([], {}), '()\n', (774, 776), False, 'import uvloop\n'), ((1374, 1424), 'os.path.join', 'os.path.join', (['CONFIG_DIR', '"""service"""', '"""search.json"""'], {}), "(CONFIG_DIR, 'service', 'search.json')\n", (1386, 1424), False, 'import os\n'), ((1504, 1558), 'os.path.join', 'os.path.join', (['CONFIG_DIR', '"""business_drivers"""', '"""search"""'], {}), "(CONFIG_DIR, 'business_drivers', 'search')\n", (1516, 1558), False, 'import os\n'), ((1626, 1660), 'os.path.join', 'os.path.join', (['CONFIG_DIR', '"""client"""'], {}), "(CONFIG_DIR, 'client')\n", (1638, 1660), False, 'import os\n'), ((1702, 1746), 'os.path.join', 'os.path.join', (['BUSINESS_CONFIGS_DIR', '"""*.json"""'], {}), "(BUSINESS_CONFIGS_DIR, '*.json')\n", (1714, 1746), False, 'import os\n'), ((1785, 1832), 'os.path.join', 'os.path.join', (['CLIENT_CONFIGS_DIR', '"""*"""', '"""*.json"""'], {}), "(CLIENT_CONFIGS_DIR, '*', '*.json')\n", (1797, 1832), False, 'import os\n'), ((2039, 2083), 'core.utils.load_config', 'load_config', (['self.SEARCH_SERVICE_CONFIG_PATH'], {}), '(self.SEARCH_SERVICE_CONFIG_PATH)\n', (2050, 2083), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((2116, 2156), 'core.utils.make_config', 'make_config', (['self.BUSINESS_CONFIGS_PATHS'], {}), '(self.BUSINESS_CONFIGS_PATHS)\n', (2127, 2156), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((2187, 2225), 'core.utils.make_config', 'make_config', (['self.CLIENT_CONFIGS_PATHS'], {}), '(self.CLIENT_CONFIGS_PATHS)\n', (2198, 2225), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((2309, 2350), 'core.async_requests.AsyncHttpRequests', 'AsyncHttpRequests', ([], {}), '(**self.service_configs)\n', (2326, 2350), False, 'from core.async_requests import AsyncHttpRequests\n'), ((2463, 2474), 'time.time', 'time.time', ([], {}), '()\n', (2472, 2474), False, 'import time\n'), ((2607, 2661), 'redis.Redis', 'redis.Redis', ([], {}), "(**self.service_configs['redis_db_params'])\n", (2618, 2661), False, 'import redis\n'), ((3268, 3302), 'core.async_queue.get_queue', 'async_queue.get_queue', ([], {'maxsize': '(200)'}), '(maxsize=200)\n', (3289, 3302), False, 'from core import async_queue, filtration, async_write\n'), ((3325, 3348), 'core.async_queue.get_queue', 'async_queue.get_queue', ([], {}), '()\n', (3346, 3348), False, 'from core import async_queue, filtration, async_write\n'), ((3371, 3394), 'core.async_queue.get_queue', 'async_queue.get_queue', ([], {}), '()\n', (3392, 3394), False, 'from core import async_queue, filtration, async_write\n'), ((19040, 19051), 'time.time', 'time.time', ([], {}), '()\n', (19049, 19051), False, 'import time\n'), ((284, 309), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (299, 309), False, 'import os\n'), ((4316, 4379), 'core.utils.parse_consumer', 'parse_consumer', ([], {'next_queue': 'parse_queue', 'write_queue': 'write_queue'}), '(next_queue=parse_queue, write_queue=write_queue)\n', (4330, 4379), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((19519, 19563), 'core.utils.load_config', 'load_config', (['self.SEARCH_SERVICE_CONFIG_PATH'], {}), '(self.SEARCH_SERVICE_CONFIG_PATH)\n', (19530, 19563), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((19600, 19640), 'core.utils.make_config', 'make_config', (['self.BUSINESS_CONFIGS_PATHS'], {}), '(self.BUSINESS_CONFIGS_PATHS)\n', (19611, 19640), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((19675, 19713), 'core.utils.make_config', 'make_config', (['self.CLIENT_CONFIGS_PATHS'], {}), '(self.CLIENT_CONFIGS_PATHS)\n', (19686, 19713), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((19754, 19795), 'core.async_requests.AsyncHttpRequests', 'AsyncHttpRequests', ([], {}), '(**self.service_configs)\n', (19771, 19795), False, 'from core.async_requests import AsyncHttpRequests\n'), ((19831, 19842), 'time.time', 'time.time', ([], {}), '()\n', (19840, 19842), False, 'import time\n'), ((30752, 30782), 'itertools.combinations', 'combinations', (['keywords', 'degree'], {}), '(keywords, degree)\n', (30764, 30782), False, 'from itertools import combinations\n'), ((3584, 3651), 'aiohttp.client.TCPConnector', 'TCPConnector', ([], {'limit': 'self.async_http_requests.connections', 'ssl': '(False)'}), '(limit=self.async_http_requests.connections, ssl=False)\n', (3596, 3651), False, 'from aiohttp.client import ClientSession, TCPConnector\n'), ((3927, 4035), 'core.async_queue.worker', 'async_queue.worker', (['query_queue', 'parse_queue', 'self.async_http_requests.handle_requests'], {'session': 'session'}), '(query_queue, parse_queue, self.async_http_requests.\n handle_requests, session=session)\n', (3945, 4035), False, 'from core import async_queue, filtration, async_write\n'), ((5139, 5165), 'asyncio.sleep', 'asyncio.sleep', (['ASYNC_SLEEP'], {}), '(ASYNC_SLEEP)\n', (5152, 5165), False, 'import asyncio\n'), ((7294, 7320), 'asyncio.sleep', 'asyncio.sleep', (['ASYNC_SLEEP'], {}), '(ASYNC_SLEEP)\n', (7307, 7320), False, 'import asyncio\n'), ((7902, 7928), 'asyncio.sleep', 'asyncio.sleep', (['ASYNC_SLEEP'], {}), '(ASYNC_SLEEP)\n', (7915, 7928), False, 'import asyncio\n'), ((8154, 8183), 'asyncio.as_completed', 'asyncio.as_completed', (['futures'], {}), '(futures)\n', (8174, 8183), False, 'import asyncio\n'), ((19328, 19377), 'os.path.join', 'os.path.join', (['self.BUSINESS_CONFIGS_DIR', '"""*.json"""'], {}), "(self.BUSINESS_CONFIGS_DIR, '*.json')\n", (19340, 19377), False, 'import os\n'), ((19429, 19481), 'os.path.join', 'os.path.join', (['self.CLIENT_CONFIGS_DIR', '"""*"""', '"""*.json"""'], {}), "(self.CLIENT_CONFIGS_DIR, '*', '*.json')\n", (19441, 19481), False, 'import os\n'), ((5903, 5935), 'core.utils.pop_arb_field_if_exists', 'pop_arb_field_if_exists', (['message'], {}), '(message)\n', (5926, 5935), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((8640, 8678), 'core.utils.pop_arb_field_if_exists', 'pop_arb_field_if_exists', (['parsed_output'], {}), '(parsed_output)\n', (8663, 8678), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((5565, 5584), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (5575, 5584), False, 'import json\n'), ((6482, 6516), 'core.utils.set_arb', 'set_arb', ([], {'msg': 'query', 'arb': 'arb_fields'}), '(msg=query, arb=arb_fields)\n', (6489, 6516), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((6545, 6746), 'core.filtration.filter_entities_redis', 'filtration.filter_entities_redis', (['entity', "query['language']", "message['business_function']", "self.business_configs['search'][message['business_function']]['refresh_period']", 'self.search_history'], {}), "(entity, query['language'], message[\n 'business_function'], self.business_configs['search'][message[\n 'business_function']]['refresh_period'], self.search_history)\n", (6577, 6746), False, 'from core import async_queue, filtration, async_write\n'), ((8809, 8834), 'json.dumps', 'json.dumps', (['parsed_output'], {}), '(parsed_output)\n', (8819, 8834), False, 'import json\n'), ((5304, 5330), 'asyncio.sleep', 'asyncio.sleep', (['ASYNC_SLEEP'], {}), '(ASYNC_SLEEP)\n', (5317, 5330), False, 'import asyncio\n'), ((8451, 8494), 're.sub', 're.sub', (['"""\\\\W+"""', '""""""', "parsed_output['entity']"], {}), "('\\\\W+', '', parsed_output['entity'])\n", (8457, 8494), False, 'import re\n'), ((11635, 11724), 'core.async_write.write_data', 'async_write.write_data', (['audit_path', 'f"""{parsed_output[\'error\']}\n"""', '"""a"""'], {'encoding': 'None'}), '(audit_path, f"{parsed_output[\'error\']}\\n", \'a\',\n encoding=None)\n', (11657, 11724), False, 'from core import async_queue, filtration, async_write\n'), ((9344, 9504), 'core.filtration.filter_garbage_url_search_result', 'filtration.filter_garbage_url_search_result', (['url', "self.business_configs['search'][parsed_output['business_function']][\n 'url_filter_exclusion_patterns']"], {}), "(url, self.business_configs[\n 'search'][parsed_output['business_function']][\n 'url_filter_exclusion_patterns'])\n", (9387, 9504), False, 'from core import async_queue, filtration, async_write\n'), ((10078, 10244), 'core.filtration.filter_garbage_url_search_result', 'filtration.filter_garbage_url_search_result', (['cache_url', "self.business_configs['search'][parsed_output['business_function']][\n 'url_filter_exclusion_patterns']"], {}), "(cache_url, self.\n business_configs['search'][parsed_output['business_function']][\n 'url_filter_exclusion_patterns'])\n", (10121, 10244), False, 'from core import async_queue, filtration, async_write\n'), ((11104, 11135), 'core.utils.set_arb', 'set_arb', ([], {'msg': 'msg', 'arb': 'arb_field'}), '(msg=msg, arb=arb_field)\n', (11111, 11135), False, 'from core.utils import load_config, parse_consumer, make_config, pop_arb_field_if_exists, set_arb\n'), ((28931, 28967), 'datetime.datetime.strptime', 'datetime.strptime', (['date1', '"""%m/%d/%Y"""'], {}), "(date1, '%m/%d/%Y')\n", (28948, 28967), False, 'from datetime import datetime\n'), ((29002, 29038), 'datetime.datetime.strptime', 'datetime.strptime', (['date2', '"""%m/%d/%Y"""'], {}), "(date2, '%m/%d/%Y')\n", (29019, 29038), False, 'from datetime import datetime\n')]
|
# Simple tb logger
import torch
from exp import ex
'''
geometry_normalizer = {
'cartesian': 4, # [0,1]x[0,1]x[0,1]x[0,1]
'angular': 98.696, # [-pi,pi]x[-.5pi,.5pi]x[0,2pi]x[0,pi]
'spherical': 61.348, # [-1,1]x[-1,1]x[-1,1]x[0,2pi]x[0,pi]
'quaternion': 17 # [0,1]x[-1,1]x[-1,1]x[0,2]x[0,2]
}
'''
def write_logs(logger, timestamp, lr, stat, meta, mode="train"):
if mode == "train":
logger.add_scalar('Train/lr', lr, timestamp)
for k, v in stat.items():
if type(v) == torch.Tensor and v.dim() == 0:
logger.add_scalar(f'Train/{k}', v.item(), timestamp)
elif type(v) == str:
logger.add_text(f'Train/{k}', v, timestamp)
else:
for k, v in stat.items():
if type(v) in [int, float]:
logger.add_scalar(f'{mode.capitalize()}/{k}', v, timestamp)
elif type(v) == torch.Tensor and v.dim() == 0:
logger.add_scalar(f'{mode.capitalize()}/{k}', v.item(), timestamp)
elif type(v) == str:
logger.add_text(f'{mode.capitalize()}/{k}', v, timestamp)
#logger.add_image('Eval/image', img, timestamp)
@ex.capture()
def adjust_grounding_error(error, geometry):
return error * geometry_normalizer[geometry]
|
[
"exp.ex.capture"
] |
[((1216, 1228), 'exp.ex.capture', 'ex.capture', ([], {}), '()\n', (1226, 1228), False, 'from exp import ex\n')]
|
from time import time
def main():
start = time()
target = 200
ways = 0
for a in range(target, -1, -200):
for b in range(a, -1, -100):
for c in range(b, -1, -50):
for d in range(c, -1, -20):
for e in range(d, -1, -10):
for f in range(e, -1, -5):
for g in range(f, -1, -2):
ways += 1
print(ways)
print(time() - start)
if __name__ == '__main__':
main()
|
[
"time.time"
] |
[((48, 54), 'time.time', 'time', ([], {}), '()\n', (52, 54), False, 'from time import time\n'), ((473, 479), 'time.time', 'time', ([], {}), '()\n', (477, 479), False, 'from time import time\n')]
|
# Copyright (c) 2015, <NAME>
import os
from tap.main import main
from tap.tests import TestCase
class TestMain(TestCase):
"""Tests for tap.main.main"""
def test_exits_with_error(self):
"""The main function returns an error status if there were failures."""
argv = ['/bin/fake', 'fake.tap']
stream = open(os.devnull, 'w')
status = main(argv, stream=stream)
self.assertEqual(1, status)
|
[
"tap.main.main"
] |
[((376, 401), 'tap.main.main', 'main', (['argv'], {'stream': 'stream'}), '(argv, stream=stream)\n', (380, 401), False, 'from tap.main import main\n')]
|