hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c53ee6a38e510f5027c1dca7e351f88ae7ec5e09 | 2,074 | py | Python | bot/discord/models.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 11 | 2018-09-03T16:50:25.000Z | 2020-07-17T05:27:25.000Z | bot/discord/models.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 5 | 2018-10-08T00:18:21.000Z | 2018-11-26T22:01:40.000Z | bot/discord/models.py | meooow25/cp-discord-bot | 4d25b51f9dc4dc44105a6cebeeaea9ef1191c8c1 | [
"MIT"
] | 1 | 2018-10-09T09:30:07.000Z | 2018-10-09T09:30:07.000Z | from enum import IntEnum
class User:
__slots__ = ('id', 'username', 'discriminator', 'bot')
def __init__(self, **kwargs):
self.id = kwargs['id']
self.username = kwargs['username']
self.discriminator = kwargs['discriminator']
self.bot = kwargs.get('bot')
def to_dict(self):
return {
key: getattr(self, key)
for key in self.__slots__
if getattr(self, key) is not None
}
class Channel:
__slots__ = ('id', 'type', 'name', 'guild_id', 'recipients')
class Type(IntEnum):
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
def __init__(self, **kwargs):
self.id = kwargs['id']
self.type = self.Type(kwargs['type'])
self.name = kwargs.get('name')
self.guild_id = kwargs.get('guild_id')
self.recipients = None
if kwargs.get('recipients'):
self.recipients = [User(**user_d) for user_d in kwargs.get('recipients')]
def to_dict(self):
channel_d = {
key: getattr(self, key)
for key in self.__slots__
if getattr(self, key) is not None
}
if self.recipients:
channel_d['recipients'] = [user.to_dict() for user in self.recipients]
return channel_d
class Message:
__slots__ = ('id', 'type', 'channel_id', 'webhook_id', 'author', 'content', 'embeds')
class Type(IntEnum):
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
def __init__(self, **kwargs):
self.id = kwargs['id']
self.type = self.Type(kwargs['type'])
self.channel_id = kwargs['channel_id']
self.webhook_id = kwargs.get('webhook_id')
self.author = User(**kwargs['author']) if not self.webhook_id else None
self.content = kwargs['content']
self.embeds = kwargs['embeds']
| 28.805556 | 89 | 0.570395 |
ab073040268c3f9c1f83a0a241cfbf04e16a12fe | 2,665 | py | Python | src/metric/monitor_manager/mpstat.py | jonadmark/repd | 5b0dd3e67aeab4316e89a27af37497f6a91c83b3 | [
"MIT"
] | null | null | null | src/metric/monitor_manager/mpstat.py | jonadmark/repd | 5b0dd3e67aeab4316e89a27af37497f6a91c83b3 | [
"MIT"
] | 1 | 2015-08-07T01:36:36.000Z | 2015-08-07T01:36:36.000Z | src/metric/monitor_manager/mpstat.py | jonadmark/repd | 5b0dd3e67aeab4316e89a27af37497f6a91c83b3 | [
"MIT"
] | null | null | null | import threading
import time
import subprocess
class mpstat(threading.Thread):
def __init__(self, callback):
"""Form the monitor and register the monitored metrics."""
super(mpstat, self).__init__()
self.stop_flag = False
self.interval = None
self.callback = callback
self.callback.add('cpu', 'inuse')
self.callback.add('cpu', 'steal')
def stop(self):
"""Prepare monitor to stop."""
self.stop_flag = True
def set(self, argument, value):
"""Set a monitor argument.
Arguments:
argument -- arugment name
value -- value for the argument
"""
if argument == 'interval':
self.interval = value
def run(self):
"""Monitor the metrics."""
if not self.interval:
raise RuntimeError
# open mpstat as a subprocess
process_parameters = ['mpstat', str(self.interval)]
try:
mpstat_subprocess = subprocess.Popen(process_parameters,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
print('Could not create mpstat monitor, check if the sysstat package is installed in your system.')
return
# discard prelude
mpstat_subprocess.stdout.readline()
mpstat_subprocess.stdout.readline()
# get column numbers
output_line = mpstat_subprocess.stdout.readline().decode('ascii')
output_line_columns = output_line.split()
idle_col = output_line_columns.index('%idle')
steal_col = output_line_columns.index('%steal')
while mpstat_subprocess.poll() is None and not self.stop_flag:
# read one line from the output
output_line = mpstat_subprocess.stdout.readline().decode('ascii')
output_line_columns = output_line.split()
# in_use = 100.0 - %idle
# (%idle: 12th column in line)
idle = float(output_line_columns[idle_col].replace(',', '.'))
inuse = 100.0 - idle
self.callback.set_metric('cpu', 'inuse', inuse)
# steal = %steal
# (%steal: 9th column in line)
steal = float(output_line_columns[steal_col].replace(',', '.'))
self.callback.set_metric('cpu', 'steal', steal)
# sleep some time
time.sleep(self.interval)
#if mpstat_subprocess.poll() is not None:
# mpstat_subprocess.terminate()
self.callback.remove('cpu', 'inuse')
self.callback.remove('cpu', 'steal')
| 32.901235 | 111 | 0.577111 |
aaa6eb946c81abe7d71a61877a106642d9fa9d39 | 2,445 | py | Python | timelink/cli.py | time-link/timelink-py | 60d51bfedb64688aa7603f074d7bbc7432b5e841 | [
"MIT"
] | null | null | null | timelink/cli.py | time-link/timelink-py | 60d51bfedb64688aa7603f074d7bbc7432b5e841 | [
"MIT"
] | 3 | 2021-08-02T13:25:46.000Z | 2022-03-27T11:17:59.000Z | timelink/cli.py | time-link/timelink-py | 60d51bfedb64688aa7603f074d7bbc7432b5e841 | [
"MIT"
] | null | null | null | """Console script for timelink.
Also provides basic mhk manager functionality.
TODO consider typer https://typer.tiangolo.com
"""
import typer
import platform
from timelink.mhk.utilities import get_mhk_env, get_mhk_app_env
import docker
# We use Typer https://typer.tiangolo.com
app = typer.Typer(help="Timelink and MHK manager")
mhk_app = typer.Typer()
app.add_typer(mhk_app,name="mhk", help="MHK legacy manager")
@mhk_app.command(name='version')
def mhk_version():
"""shows MHK manager version
Demonstrates how to access MHK installation files and
usage of Docker API
"""
# this should go to mhk.utilities as get_mhk_info()
mhk_env = get_mhk_env()
user_home = mhk_env['HOST_MHK_USER_HOME']
mhk_home = mhk_env['HOST_MHK_HOME']
with open(mhk_home + '/app/manager_version', 'r') as file:
mv = file.read().replace('\n', '')
with open(mhk_home + '/.mhk-home', 'r') as file:
mhk_home_update = file.read().replace('\n', '')
with open(mhk_home + '/.mhk-home-manager-init', 'r') as file:
mhk_home_init = file.read().replace('\n', '')
mhk_app_env = get_mhk_app_env()
mhk_host = mhk_app_env.get('MHK_HOST', 'localhost')
# end of get_hmk_info
client = docker.from_env()
dv = client.version()
mhkv = \
f""" Manager version: {mv}
Docker version: {dv["Version"]}
Host OS: {platform.system()} {platform.release()}
User home: {user_home}
mhk-home: {mhk_home}
mhk-home init: {mhk_home_init}
mhk-home update: {mhk_home_update}
mhk use-tag: {mhk_app_env.get("TAG", "*none*")}
mhk local host: {mhk_host}
MHK URL: http://127.0.0.1:8080/mhk
Kleio URL: http://127.0.0.1:8088
Portainer URL: http://127.0.0.1:9000"""
typer.echo(mhkv)
return 0
@mhk_app.command(name='status')
def mhk_status():
"""shows docker status information"""
client = docker.from_env()
dinfo = client.info()
typer.echo(f"""
Containers :{dinfo['Containers']}
Running :{dinfo['ContainersRunning']}
Paused :{dinfo['ContainersPaused']}
Stopped :{dinfo['ContainersStopped']}
""")
return 0
@app.callback()
def main():
"""
This is the timelink/MHK manager on the command line
"""
typer.echo("This is the timelink/MHK manager on the command line")
if __name__ == "__main__":
app() # pragma: no cover
| 28.764706 | 70 | 0.635583 |
fd15976ed5750ee2a5edbc3ecdb0a5ccdf803e2a | 2,198 | py | Python | src/viewer/app/mixins.py | mappin/asxtrade | 2b97ffcdefae642a49ce5bfcc131db17796f1691 | [
"Apache-2.0"
] | null | null | null | src/viewer/app/mixins.py | mappin/asxtrade | 2b97ffcdefae642a49ce5bfcc131db17796f1691 | [
"Apache-2.0"
] | 1 | 2021-04-13T05:00:40.000Z | 2021-04-13T05:00:40.000Z | src/viewer/app/mixins.py | mappin/asxtrade | 2b97ffcdefae642a49ce5bfcc131db17796f1691 | [
"Apache-2.0"
] | null | null | null | from app.models import Quotation, VirtualPurchase
from bson.objectid import ObjectId
class VirtualPurchaseMixin:
"""
Retrieve the object by mongo _id for use by CRUD CBV views for VirtualPurchase's
"""
def get_object(self, queryset=None):
slug = self.kwargs.get("slug")
purchase = VirtualPurchase.objects.mongo_find_one({"_id": ObjectId(slug)})
# print(purchase)
purchase["id"] = purchase["_id"]
purchase.pop("_id", None)
return VirtualPurchase(**purchase)
class SearchMixin:
model = Quotation
object_list = Quotation.objects.none()
def get(self, request, *args, **kwargs):
"""need to subclass this method to ensure pagination works correctly (as 'next', 'last' etc. is GET not POST)"""
d = {}
key = self.__class__.__name__
print("Updating session state: {}".format(key))
d.update(request.session.get(key, {})) # update the form to the session state
return self.update_form(d)
def get_initial_form(self, form_values):
assert isinstance(form_values, dict)
form_class = self.get_form_class()
return form_class(initial=form_values)
def update_form(self, form_values):
assert isinstance(form_values, dict)
# apply the form settings to self.queryset (specific to a CBV - watch for subclass overrides)
self.object_list = self.get_queryset(**form_values)
state_field = (
self.__class__.__name__
) # NB: must use class name so that each search type has its own state for a given user
self.request.session[state_field] = form_values
context = self.get_context_data()
assert context is not None
assert self.action_url is not None
context["action_url"] = self.action_url
self.form = self.form_class(initial=form_values)
context["form"] = self.form
return self.render_to_response(context)
def form_invalid(self, form):
return self.update_form(form.cleaned_data)
# this is called from self.post()
def form_valid(self, form):
assert form.is_valid()
return self.update_form(form.cleaned_data)
| 37.254237 | 120 | 0.665605 |
c2a16d6cce15f4a81ae0e997cf0db4196d76fe27 | 6,520 | bzl | Python | bazel/python_rules.bzl | fujimoto/grpc | a793d3777af43fdbd2d537d67fcbdd3c1de457bf | [
"Apache-2.0"
] | 1 | 2021-01-14T07:22:27.000Z | 2021-01-14T07:22:27.000Z | bazel/python_rules.bzl | fujimoto/grpc | a793d3777af43fdbd2d537d67fcbdd3c1de457bf | [
"Apache-2.0"
] | null | null | null | bazel/python_rules.bzl | fujimoto/grpc | a793d3777af43fdbd2d537d67fcbdd3c1de457bf | [
"Apache-2.0"
] | 4 | 2020-08-10T06:05:01.000Z | 2021-12-12T09:26:50.000Z | """Generates and compiles Python gRPC stubs from proto_library rules."""
load(
"//bazel:protobuf.bzl",
"get_include_protoc_args",
"get_plugin_args",
"get_proto_root",
"proto_path_to_generated_filename",
"protos_from_context",
"includes_from_deps",
"get_proto_arguments",
"declare_out_files",
)
_GENERATED_PROTO_FORMAT = "{}_pb2.py"
_GENERATED_GRPC_PROTO_FORMAT = "{}_pb2_grpc.py"
def _generate_py_impl(context):
protos = protos_from_context(context)
includes = includes_from_deps(context.attr.deps)
proto_root = get_proto_root(context.label.workspace_root)
out_files = declare_out_files(protos, context, _GENERATED_PROTO_FORMAT)
tools = [context.executable._protoc]
arguments = ([
"--python_out={}".format(
context.genfiles_dir.path,
),
] + get_include_protoc_args(includes) + [
"--proto_path={}".format(context.genfiles_dir.path)
for proto in protos
])
arguments += get_proto_arguments(protos, context.genfiles_dir.path)
context.actions.run(
inputs = protos + includes,
tools = tools,
outputs = out_files,
executable = context.executable._protoc,
arguments = arguments,
mnemonic = "ProtocInvocation",
)
return struct(files = depset(out_files))
_generate_pb2_src = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
allow_empty = False,
providers = [ProtoInfo],
),
"_protoc": attr.label(
default = Label("//external:protocol_compiler"),
providers = ["files_to_run"],
executable = True,
cfg = "host",
),
},
implementation = _generate_py_impl,
)
def py_proto_library(
name,
deps,
**kwargs):
"""Generate python code for a protobuf.
Args:
name: The name of the target.
deps: A list of proto_library dependencies. Must contain a single element.
"""
codegen_target = "_{}_codegen".format(name)
if len(deps) != 1:
fail("Can only compile a single proto at a time.")
_generate_pb2_src(
name = codegen_target,
deps = deps,
**kwargs
)
native.py_library(
name = name,
srcs = [":{}".format(codegen_target)],
deps = ["@com_google_protobuf//:protobuf_python"],
**kwargs
)
def _generate_pb2_grpc_src_impl(context):
protos = protos_from_context(context)
includes = includes_from_deps(context.attr.deps)
proto_root = get_proto_root(context.label.workspace_root)
out_files = declare_out_files(protos, context, _GENERATED_GRPC_PROTO_FORMAT)
plugin_flags = ["grpc_2_0"] + context.attr.strip_prefixes
arguments = []
tools = [context.executable._protoc, context.executable._plugin]
arguments += get_plugin_args(
context.executable._plugin,
plugin_flags,
context.genfiles_dir.path,
False,
)
arguments += get_include_protoc_args(includes)
arguments += [
"--proto_path={}".format(context.genfiles_dir.path)
for proto in protos
]
arguments += get_proto_arguments(protos, context.genfiles_dir.path)
context.actions.run(
inputs = protos + includes,
tools = tools,
outputs = out_files,
executable = context.executable._protoc,
arguments = arguments,
mnemonic = "ProtocInvocation",
)
return struct(files = depset(out_files))
_generate_pb2_grpc_src = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
allow_empty = False,
providers = [ProtoInfo],
),
"strip_prefixes": attr.string_list(),
"_plugin": attr.label(
executable = True,
providers = ["files_to_run"],
cfg = "host",
default = Label("//src/compiler:grpc_python_plugin"),
),
"_protoc": attr.label(
executable = True,
providers = ["files_to_run"],
cfg = "host",
default = Label("//external:protocol_compiler"),
),
},
implementation = _generate_pb2_grpc_src_impl,
)
def py_grpc_library(
name,
srcs,
deps,
strip_prefixes = [],
**kwargs):
"""Generate python code for gRPC services defined in a protobuf.
Args:
name: The name of the target.
srcs: (List of `labels`) a single proto_library target containing the
schema of the service.
deps: (List of `labels`) a single py_proto_library target for the
proto_library in `srcs`.
strip_prefixes: (List of `strings`) If provided, this prefix will be
stripped from the beginning of foo_pb2 modules imported by the
generated stubs. This is useful in combination with the `imports`
attribute of the `py_library` rule.
**kwargs: Additional arguments to be supplied to the invocation of
py_library.
"""
codegen_grpc_target = "_{}_grpc_codegen".format(name)
if len(srcs) != 1:
fail("Can only compile a single proto at a time.")
if len(deps) != 1:
fail("Deps must have length 1.")
_generate_pb2_grpc_src(
name = codegen_grpc_target,
deps = srcs,
strip_prefixes = strip_prefixes,
**kwargs
)
native.py_library(
name = name,
srcs = [
":{}".format(codegen_grpc_target),
],
deps = [Label("//src/python/grpcio/grpc:grpcio")] + deps,
**kwargs
)
def py2and3_test(name,
py_test = native.py_test,
**kwargs):
"""Runs a Python test under both Python 2 and Python 3.
Args:
name: The name of the test.
py_test: The rule to use for each test.
**kwargs: Keyword arguments passed directly to the underlying py_test
rule.
"""
if "python_version" in kwargs:
fail("Cannot specify 'python_version' in py2and3_test.")
names = [name + suffix for suffix in (".python2", ".python3")]
python_versions = ["PY2", "PY3"]
for case_name, python_version in zip(names, python_versions):
py_test(
name = case_name,
python_version = python_version,
**kwargs
)
suite_kwargs = {}
if "visibility" in kwargs:
suite_kwargs["visibility"] = kwargs["visibility"]
native.test_suite(
name = name,
tests = names,
**suite_kwargs
)
| 28.849558 | 80 | 0.611656 |
2c5a30e7dccbbb4af1890db401c376ccf41d3933 | 1,041 | py | Python | timeo/parameters.py | DuanraDlaw/timeo-tice | a1463e0b90cb969d4e2798879bd6af953e3dd41c | [
"MIT"
] | null | null | null | timeo/parameters.py | DuanraDlaw/timeo-tice | a1463e0b90cb969d4e2798879bd6af953e3dd41c | [
"MIT"
] | null | null | null | timeo/parameters.py | DuanraDlaw/timeo-tice | a1463e0b90cb969d4e2798879bd6af953e3dd41c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
BUS_NETWORK_CODE = '910'
def format_url(xml_key, post_params, bus_network=BUS_NETWORK_CODE):
return "http://timeo3.keolis.com/relais/{0}.php?xml={1}&{2}".\
format(bus_network, xml_key, post_params)
def get_url_all_lines():
"""
URL pour toutes les lignes.
http://timeo3.keolis.com/relais/<code_réseau>.php?xml=1
"""
xml_key = 1
post_params = ''
return format_url(xml_key, post_params)
def get_url_stop_codes(ligne, sens):
"""
URL pour les codes d'arrêt.
http://timeo3.keolis.com/relais/<code_réseau>.php?xml=1&ligne=<id_ligne>&sens=<id_sens>
"""
xml_key = 1
post_params = "ligne={0}&sens={1}".format(ligne, sens)
return format_url(xml_key, post_params)
def get_url_times(ref_arret):
"""
URL pour les temps de passage.
http://timeo3.keolis.com/relais/<code_réseau>.php?xml=3&refs=<référence_arret>&ran=1
"""
xml_key = 3
post_params = "refs={0}&ran=1".format(ref_arret)
return format_url(xml_key, post_params)
| 23.659091 | 91 | 0.661864 |
cd7a9d0780a462854e6196925342bd3e98d6035d | 1,179 | py | Python | weak_disentangle/tensorsketch/modules/shape.py | dtch1997/disentangle-gen | 9c50dcb09063db018aa0090a564f96b798125a2f | [
"Apache-2.0"
] | null | null | null | weak_disentangle/tensorsketch/modules/shape.py | dtch1997/disentangle-gen | 9c50dcb09063db018aa0090a564f96b798125a2f | [
"Apache-2.0"
] | 5 | 2019-11-16T06:04:34.000Z | 2022-02-10T00:33:18.000Z | weak_disentangle/tensorsketch/modules/shape.py | dtch1997/disentangle-gen | 9c50dcb09063db018aa0090a564f96b798125a2f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Weak Disentangle Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Shape and broadcasting modification modules.
"""
import tensorflow as tf
from weak_disentangle.tensorsketch.modules.base import Module
class Flatten(Module):
"""Flattens input along all dimensions except first dimension.
"""
def forward(self, x):
return tf.reshape(x, (x.shape[0], -1))
class Reshape(Module):
"""Reshape the input.
"""
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, x):
return tf.reshape(x, self.shape)
def extra_repr(self):
return "({})".format(self.shape)
| 25.085106 | 74 | 0.721798 |
643116486973b29f0b7f126b1c1dd98396a65ebd | 2,300 | py | Python | day12/day12.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | day12/day12.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | day12/day12.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | import sys
dirs = ["E", "S", "W", "N"]
class Ship:
def __init__(self):
self.loc = [0, 0]
self.facing = "E"
def follow_instruction(self, instruction):
direction = instruction[0]
value = int(instruction[1:])
if direction == "F":
direction = self.facing
if direction in {"R", "L"}:
turn = value // 90
value = 0
if direction == "L":
turn *= -1
self.facing = dirs[(dirs.index(self.facing) + turn) % len(dirs)]
if direction == "N":
self.loc[1] += value
elif direction == "S":
self.loc[1] -= value
elif direction == "E":
self.loc[0] += value
elif direction == "W":
self.loc[0] -= value
def distance(self):
return abs(self.loc[0]) + abs(self.loc[1])
class Ship2(Ship):
def __init__(self):
super().__init__()
self.waypoint = [10, 1]
def follow_instruction(self, instruction):
# print(instruction, self.loc, self.waypoint)
direction = instruction[0]
value = int(instruction[1:])
if direction == "F":
self.loc = [
self.loc[0] + self.waypoint[0] * value,
self.loc[1] + self.waypoint[1] * value,
]
elif direction == "N":
self.waypoint[1] += value
elif direction == "S":
self.waypoint[1] -= value
elif direction == "E":
self.waypoint[0] += value
elif direction == "W":
self.waypoint[0] -= value
elif direction in {"R", "L"}:
turn = value // 90
if direction == "R":
turn = 4 - turn
for i in range(turn):
self.waypoint = [-self.waypoint[1], self.waypoint[0]]
# print("==>", self.loc, self.waypoint, self.distance())
if __name__ == "__main__":
input_file = sys.argv[1]
with open(input_file, "r") as f:
directions = [line.strip() for line in f.readlines()]
ship = Ship()
for d in directions:
ship.follow_instruction(d)
# break
print(ship.distance())
ship2 = Ship2()
for d in directions:
ship2.follow_instruction(d)
print(ship2.distance())
| 27.380952 | 76 | 0.5 |
a70265a9fabf8259f80a0f70f42b9534c29197a5 | 4,475 | py | Python | project 3/src/project3_submission.py | gttm/eth-machine-learning | 68ce17dcf0eff736c56c410a0e77eba58bedfed5 | [
"MIT"
] | null | null | null | project 3/src/project3_submission.py | gttm/eth-machine-learning | 68ce17dcf0eff736c56c410a0e77eba58bedfed5 | [
"MIT"
] | null | null | null | project 3/src/project3_submission.py | gttm/eth-machine-learning | 68ce17dcf0eff736c56c410a0e77eba58bedfed5 | [
"MIT"
] | 1 | 2018-10-12T23:07:33.000Z | 2018-10-12T23:07:33.000Z | import sys
import ast
import numpy as np
from sklearn.feature_selection import SelectKBest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import hamming_loss, make_scorer
from scipy.stats import expon, uniform, randint
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegressionCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import RandomizedSearchCV
if len(sys.argv) != 6:
print "Usage: python {} <preprocess_train_file> <preprocess_test_file> <features_no> <samples_no> <targets>".format(sys.argv[0])
exit(0)
preprocessTrainFilePath = sys.argv[1]
preprocessTestFilePath = sys.argv[2]
featuresNo = int(sys.argv[3]) # number of features after dimension reduction
samplesNo = int(sys.argv[4]) # number of samples for randomized search
cvFolds = 5
# Load targets
targetsFile = open(sys.argv[5], 'r')
targets = [[int(i) for i in line.strip().split(',')] for line in targetsFile]
targets = np.array(targets)
# Load train features
preprocessTrainFile = open(preprocessTrainFilePath, 'r')
featureMatrixTrain = []
for line in preprocessTrainFile:
featureMatrixTrain.append(ast.literal_eval(line))
featureMatrixTrain = np.array(featureMatrixTrain)
print "Features number:", len(featureMatrixTrain[0])
# Load test features
preprocessTestFile = open(preprocessTestFilePath, 'r')
featureMatrixTest = []
for line in preprocessTestFile:
featureMatrixTest.append(ast.literal_eval(line))
featureMatrixTest = np.array(featureMatrixTest)
# FIXME
# Select features (use either this or PCA)
# dirty fix, concatenate the labels to end up with 1D targets array
targetsSelection = [4*t[0] + 2*t[1] + t[2] for t in targets]
selection = SelectKBest(k=featuresNo)
featureMatrixTrain = selection.fit_transform(featureMatrixTrain, targetsSelection)
print "Features after SelectKBest:", len(featureMatrixTrain[0])
featureMatrixTest = selection.transform(featureMatrixTest)
# PCA
#pca = PCA(svd_solver="auto", n_components=featuresNo, whiten=True)
#featureMatrixTrain = pca.fit_transform(featureMatrixTrain)
#print "Features after PCA:", len(featureMatrixTrain[0])
#featureMatrixTest = pca.transform(featureMatrixTest)
# Scale features (not needed if we whiten with PCA)
scaler = StandardScaler()
featureMatrixTrain = scaler.fit_transform(featureMatrixTrain)
featureMatrixTest = scaler.transform(featureMatrixTest)
# One-vs-all classifier
# OneVsRestClassifier: (VotingClassifier: (SVC, KNeighborsClassifier, RandomForestClassifier))
svc = SVC(probability=True)
kNeighbors = KNeighborsClassifier()
rForest = RandomForestClassifier()
estimators = [("svc", svc), ("kNeighbors", kNeighbors), ("rForest", rForest)]
voting = VotingClassifier(estimators=estimators, voting = "soft",weights=[4,1,1])
onevsall = OneVsRestClassifier(voting, n_jobs=-1)
# Do randomized search to determine the hyperparameters
hammingLoss = make_scorer(hamming_loss, greater_is_better=False)
parameters = {
"estimator__svc__kernel": ["rbf"],
"estimator__svc__C": expon(scale=100),
"estimator__svc__gamma": expon(scale=0.1),
"estimator__svc__degree": [2, 3, 4, 5],
"estimator__kNeighbors__n_neighbors": [5,7],
"estimator__rForest__n_estimators": randint(1, 30),
"estimator__rForest__max_features": randint(1, featuresNo),
"estimator__rForest__min_samples_leaf": randint(1, 100)
}
onevsall_search = RandomizedSearchCV(onevsall, param_distributions=parameters, cv=cvFolds, scoring=hammingLoss, n_jobs=-1, n_iter=samplesNo)
onevsall_search.fit(featureMatrixTrain, targets)
best = onevsall_search.best_index_
print "One-vs-all best score: {} ({}) {}".format(onevsall_search.cv_results_["mean_test_score"][best], onevsall_search.cv_results_["std_test_score"][best], onevsall_search.best_params_)
# Make predictions
predictions = onevsall_search.predict(featureMatrixTest)
# Write to submission file
submissionFile = "final_sub.csv"
f = open(submissionFile, 'w')
f.write("ID,Sample,Label,Predicted\n")
labels = ["gender", "age", "health"]
predictionBoolean = ["FALSE", "TRUE"]
ID = 0
for sample in range(len(predictions)):
p = predictions[sample]
for label in range(len(labels)):
f.write("{},{},{},{}\n".format(ID + label, sample, labels[label], predictionBoolean[predictions[sample][label]]))
ID += 3
f.close()
| 41.055046 | 185 | 0.777654 |
cb9e177cad0f93628456c464ea07f64ddca7fa35 | 1,008 | py | Python | tests/spiders/spider_test.py | SamRozen/cafe-crawl | c310b45a9c9e2be85188c9b3c2b3676be3408501 | [
"MIT"
] | null | null | null | tests/spiders/spider_test.py | SamRozen/cafe-crawl | c310b45a9c9e2be85188c9b3c2b3676be3408501 | [
"MIT"
] | null | null | null | tests/spiders/spider_test.py | SamRozen/cafe-crawl | c310b45a9c9e2be85188c9b3c2b3676be3408501 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from tests.responses.file_response import response_from_file
class SpiderTest(unittest.TestCase):
url = ''
spider_name = ''
name = ''
brand = ''
description = ''
image = ''
price = ''
size = ''
spider = None
def _test_item(self, item):
self.assertNotEqual(item['url'], '')
self.assertEqual(item['url'], self.url)
self.assertEqual(item['name'], self.name)
self.assertEqual(item['brand'], self.brand)
self.assertEqual(item['description'], self.description)
self.assertEqual(item['image'], self.image)
self.assertEqual(item['price'], self.price)
self.assertEqual(item['size'], self.size)
def _test_parse(self):
filename = 'data/%s.html' % self.spider_name
print 'Reading response from %s' % filename
response = response_from_file(filename, self.url)
results = self.spider.parse_item(response)
self._test_item(results)
| 29.647059 | 63 | 0.624008 |
c99419a33808b70db774ee367acc6c7baf181714 | 6,508 | py | Python | pyquil/api/_devices.py | yaoyongxin/pyquil | b77faefab2740b51d538709ae6439a84ddab48b8 | [
"Apache-2.0"
] | 1 | 2020-06-13T10:40:15.000Z | 2020-06-13T10:40:15.000Z | pyquil/api/_devices.py | yaoyongxin/pyquil | b77faefab2740b51d538709ae6439a84ddab48b8 | [
"Apache-2.0"
] | null | null | null | pyquil/api/_devices.py | yaoyongxin/pyquil | b77faefab2740b51d538709ae6439a84ddab48b8 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Dict, List, Optional, cast
from requests.exceptions import MissingSchema
from pyquil.api._base_connection import get_json, get_session, ForestConnection
from pyquil.api._config import PyquilConfig
from pyquil.device._main import Device
def list_devices(connection: Optional[ForestConnection] = None) -> List[str]:
"""
Query the Forest 2.0 server for a list of underlying QPU devices.
NOTE: These can't directly be used to manufacture pyQuil Device objects, but this gives a list
of legal values that can be supplied to list_lattices to filter its (potentially very
noisy) output.
:return: A list of device names.
"""
# For the record, the dictionary stored in "devices" that we're getting back is keyed on device
# names and has this structure in its values:
#
# {
# "is_online": a boolean indicating the availability of the device,
# "is_retuning": a boolean indicating whether the device is busy retuning,
# "specs": a Specs object describing the entire device, serialized as a dictionary,
# "isa": an ISA object describing the entire device, serialized as a dictionary,
# "noise_model": a NoiseModel object describing the entire device, serialized as a dictionary
# }
if connection is None:
connection = ForestConnection()
session = connection.session
assert connection.forest_cloud_endpoint is not None
url = connection.forest_cloud_endpoint + "/devices"
return sorted(get_json(session, url)["devices"].keys())
def list_lattices(
device_name: Optional[str] = None,
num_qubits: Optional[int] = None,
connection: Optional[ForestConnection] = None,
) -> Dict[str, str]:
"""
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying
device name and lattice qubit count.
:return: A dictionary keyed on lattice names and valued in dictionaries of the
form::
{
"device_name": device_name,
"qubits": num_qubits
}
"""
if connection is None:
connection = ForestConnection()
session = connection.session
assert connection.forest_cloud_endpoint is not None
url = connection.forest_cloud_endpoint + "/lattices"
try:
response = get_json(
session, url, params={"device_name": device_name, "num_qubits": num_qubits}
)
return cast(Dict[str, str], response["lattices"])
except Exception as e:
raise ValueError(
"""
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication credentials, but they are invalid. You can visit
https://qcs.rigetti.com/auth/token and save to ~/.qcs/user_auth_token to update your
authentication credentials. Alternatively, you may provide the path to your credentials in
your config file or with the USER_AUTH_TOKEN_PATH environment variable::
[Rigetti Forest]
user_auth_token_path = ~/.qcs/my_auth_credentials
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file::
[Rigetti Forest]
url = https://forest-server.qcs.rigetti.com
For the record, here's the original exception: {}
""".format(
repr(e)
)
)
def get_lattice(lattice_name: Optional[str] = None) -> Device:
"""
Construct a Device object to match the Forest 2.0 server's understanding of the named lattice.
:param lattice_name: Name of the desired lattice.
:return: A Device object.
"""
raw_lattice = _get_raw_lattice_data(lattice_name)
return Device(raw_lattice["name"], raw_lattice)
def _get_raw_lattice_data(lattice_name: Optional[str] = None) -> Dict[str, str]:
"""
Produces a dictionary of raw data for a lattice as queried from the Forest 2.0 server.
Returns a dictionary of the form::
{
"name": the name of the lattice as a string,
"device_name": the name of the device, given as a string, that the lattice lies on,
"specs": a Specs object, serialized as a dictionary,
"isa": an ISA object, serialized as a dictionary,
"noise_model": a NoiseModel object, serialized as a dictionary
}
"""
config = PyquilConfig()
session = get_session(config=config)
try:
res = get_json(session, f"{config.forest_url}/lattices/{lattice_name}")
except MissingSchema:
raise ValueError(
f"Error finding lattice `{lattice_name}` at Forest 2.0 server "
f"""endpoint `{config.forest_url}`.
Most likely, you're missing an address for the Forest 2.0 server endpoint, or the
address is invalid. This can be set through the environment variable FOREST_URL or
by changing the following lines in the QCS config file (by default, at ~/.qcs_config)::
[Rigetti Forest]
url = https://rigetti.com/valid/forest/url"""
)
return cast(Dict[str, str], res["lattice"])
| 40.42236 | 100 | 0.657498 |
8c210c62ec8697d5a906b18cea6525658d7d6933 | 4,921 | py | Python | Dependencies/gyp-master/test/defines-escaping/gyptest-defines-escaping.py | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | Dependencies/gyp-master/test/defines-escaping/gyptest-defines-escaping.py | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | Dependencies/gyp-master/test/defines-escaping/gyptest-defines-escaping.py | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define using
various special characters such as quotes, commas, etc.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# Tests string literals, percents, and backslash escapes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n' """
r"""test_args='"Simple test of %s with a literal"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.build('defines-escaping.gyp')
expect = """
Simple test of %s with a literal
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test multiple comma-and-space-separated string literals.
try:
os.environ['GYP_DEFINES'] = \
r"""test_format='\n%s and %s\n' test_args='"foo", "bar"'"""
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
foo and bar
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing quotes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s %s %s %s %s\n' """
r"""test_args='"\"These,\"","""
r""" "\"words,\"","""
r""" "\"are,\"","""
r""" "\"in,\"","""
r""" "\"quotes.\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
"These," "words," "are," "in," "quotes."
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing single quotes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s %s %s %s %s\n' """
r"""test_args="\"'These,'\","""
r""" \"'words,'\","""
r""" \"'are,'\","""
r""" \"'in,'\","""
r""" \"'quotes.'\"" """)
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
'These,' 'words,' 'are,' 'in,' 'quotes.'
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing different numbers of backslashes before quotes
# (to exercise Windows' quoting behaviour).
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n%s\n%s\n' """
r"""test_args='"\\\"1 visible slash\\\"","""
r""" "\\\\\"2 visible slashes\\\\\"","""
r""" "\\\\\\\"3 visible slashes\\\\\\\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = r"""
\"1 visible slash\"
\\"2 visible slashes\\"
\\\"3 visible slashes\\\"
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test that various scary sequences are passed unfettered.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n' """
r"""test_args='"$foo, " `foo`;"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
$foo, " `foo`;
"""
test.run_built_executable('defines_escaping', stdout=expect)
# VisualStudio 2010 can't handle passing %PATH%
if not (test.format == 'msvs' and test.uses_msbuild):
try:
os.environ['GYP_DEFINES'] = (
"""test_format='%s' """
"""test_args='"%PATH%"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = "%PATH%"
test.run_built_executable('defines_escaping', stdout=expect)
# Test commas and semi-colons preceded by backslashes (to exercise Windows'
# quoting behaviour).
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n%s\n' """
r"""test_args='"\\, \\\\;","""
# Same thing again, but enclosed in visible quotes.
r""" "\"\\, \\\\;\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = r"""
\, \\;
"\, \\;"
"""
test.run_built_executable('defines_escaping', stdout=expect)
# We deliberately do not test having an odd number of quotes in a string
# literal because that isn't feasible in MSVS.
test.pass_test()
| 26.6 | 81 | 0.601301 |
480888c90a9c108b00eb56be2c34edf4ad7cab0f | 21,851 | py | Python | mapreduce/lib/blobstore/blobstore.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 161 | 2019-07-23T06:53:45.000Z | 2022-03-24T01:07:19.000Z | mapreduce/lib/blobstore/blobstore.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | null | null | null | mapreduce/lib/blobstore/blobstore.py | igeeker/v2ex | 9fa81f7c82aa7d162a924d357494b241eb8a6207 | [
"BSD-3-Clause"
] | 26 | 2019-08-05T06:09:38.000Z | 2021-07-08T02:05:13.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods used to interface with Blobstore API. Includes db.Model-like
class representing a reference to a very large BLOB. Imports db.Key-like
class representing a blob-key.
"""
import cgi
import email
import os
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.blobstore import blobstore
from google.appengine.ext import db
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobReferenceProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'delete',
'fetch_data',
'get',
'parse_blob_info']
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
BlobKey = blobstore.BlobKey
create_upload_url = blobstore.create_upload_url
delete = blobstore.delete
class BlobInfoParseError(Error):
"""CGI parameter does not contain valid BlobInfo record."""
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
class _GqlQuery(db.GqlQuery):
"""GqlQuery class that explicitly sets model-class.
This does the same as the original db.GqlQuery class except that it does
not try to find the model class based on the compiled GQL query. The
caller instead provides the query with a model class to use for construction.
This class is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
def __init__(self, query_string, model_class, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
model_class: Model class from which entities are constructed.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
self._proto_query = gql.GQL(query_string, _app=app, namespace='')
super(db.GqlQuery, self).__init__(model_class, namespace='')
self.bind(*args, **kwds)
class BlobInfo(object):
"""Information about blobs in Blobstore.
This is a db.Model-like class that contains information about blobs stored
by an application. Like db.Model, this class is backed by an Datastore
entity, however, BlobInfo instances are read-only and have a much more
limited interface.
Each BlobInfo has a key of type BlobKey associated with it. This key is
specific to the Blobstore API and is not compatible with db.get. The key
can be used for quick lookup by passing it to BlobInfo.get. This
key converts easily to a string, which is web safe and can be embedded
in URLs.
Properties:
content_type: Content type of blob.
creation: Creation date of blob, when it was uploaded.
filename: Filename user selected from their machine.
size: Size of uncompressed blob.
All properties are read-only. Attempting to assign a value to a property
will raise NotImplementedError.
"""
_unindexed_properties = frozenset()
@property
def content_type(self):
return self.__get_value('content_type')
@property
def creation(self):
return self.__get_value('creation')
@property
def filename(self):
return self.__get_value('filename')
@property
def size(self):
return self.__get_value('size')
def __init__(self, entity_or_blob_key, _values=None):
"""Constructor for wrapping blobstore entity.
The constructor should not be used outside this package and tests.
Args:
entity: Datastore entity that represents the blob reference.
"""
if isinstance(entity_or_blob_key, datastore.Entity):
self.__entity = entity_or_blob_key
self.__key = BlobKey(entity_or_blob_key.key().name())
elif isinstance(entity_or_blob_key, BlobKey):
self.__entity = _values
self.__key = entity_or_blob_key
else:
TypeError('Must provide Entity or BlobKey')
@classmethod
def from_entity(cls, entity):
"""Convert entity to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BlobInfo(entity)
@classmethod
def properties(cls):
"""Set of properties that belong to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return set(('content_type', 'creation', 'filename', 'size'))
def __get_value(self, name):
"""Get a BlobInfo value, loading entity if necessary.
This method allows lazy loading of the underlying datastore entity. It
should never be invoked directly.
Args:
name: Name of property to get value for.
Returns:
Value of BlobInfo property from entity.
"""
if self.__entity is None:
self.__entity = datastore.Get(
datastore_types.Key.from_path(
self.kind(), str(self.__key), namespace=''))
try:
return self.__entity[name]
except KeyError:
raise AttributeError(name)
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
return self.__key
def delete(self):
"""Permanently delete blob from Blobstore."""
delete(self.key())
@classmethod
def get(cls, blob_keys):
"""Retrieve BlobInfo by key or list of keys.
Args:
blob_keys: A key or a list of keys. Keys may be instances of str,
unicode and BlobKey.
Returns:
A BlobInfo instance associated with provided key or a list of BlobInfo
instances if a list of keys was provided. Keys that are not found in
Blobstore return None as their values.
"""
blob_keys = cls.__normalize_and_convert_keys(blob_keys)
try:
entities = datastore.Get(blob_keys)
except datastore_errors.EntityNotFoundError:
return None
if isinstance(entities, datastore.Entity):
return BlobInfo(entities)
else:
references = []
for entity in entities:
if entity is not None:
references.append(BlobInfo(entity))
else:
references.append(None)
return references
@classmethod
def all(cls):
"""Get query for all Blobs associated with application.
Returns:
A db.Query object querying over BlobInfo's datastore kind.
"""
return db.Query(model_class=cls, namespace='')
@classmethod
def __factory_for_kind(cls, kind):
if kind == BLOB_INFO_KIND:
return BlobInfo
raise ValueError('Cannot query for kind %s' % kind)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: Properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
Returns:
A gql.GqlQuery object querying over BlobInfo's datastore kind.
"""
return _GqlQuery('SELECT * FROM %s %s'
% (cls.kind(), query_string),
cls,
*args,
**kwds)
@classmethod
def kind(self):
"""Get the entity kind for the BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BLOB_INFO_KIND
@classmethod
def __normalize_and_convert_keys(cls, keys):
"""Normalize and convert all keys to BlobKey type.
This method is based on datastore.NormalizeAndTypeCheck().
Args:
keys: A single key or a list/tuple of keys. Keys may be a string
or BlobKey
Returns:
Single key or list with all strings replaced by BlobKey instances.
"""
if isinstance(keys, (list, tuple)):
multiple = True
keys = list(keys)
else:
multiple = False
keys = [keys]
for index, key in enumerate(keys):
if not isinstance(key, (basestring, BlobKey)):
raise datastore_errors.BadArgumentError(
'Expected str or BlobKey; received %s (a %s)' % (
key,
datastore.typename(key)))
keys[index] = datastore.Key.from_path(cls.kind(), str(key), namespace='')
if multiple:
return keys
else:
return keys[0]
def get(blob_key):
"""Get a BlobInfo record from blobstore.
Does the same as BlobInfo.get.
"""
return BlobInfo.get(blob_key)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dict, name):
value = dict.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key = BlobKey(get_value(field_storage.type_options, 'blob-key'))
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(blob_key,
{'content_type': content_type,
'creation': creation,
'filename': filename,
'size': size,
})
class BlobReferenceProperty(db.Property):
"""Property compatible with db.Model classes.
Add references to blobs to domain models using BlobReferenceProperty:
class Picture(db.Model):
title = db.StringProperty()
image = blobstore.BlobReferenceProperty()
thumbnail = blobstore.BlobReferenceProperty()
To find the size of a picture using this model:
picture = Picture.get(picture_key)
print picture.image.size
BlobInfo objects are lazily loaded so iterating over models with
for BlobKeys is efficient, the following does not need to hit
Datastore for each image key:
list_of_untitled_blobs = []
for picture in Picture.gql("WHERE title=''"):
list_of_untitled_blobs.append(picture.image.key())
"""
data_type = BlobInfo
def get_value_for_datastore(self, model_instance):
"""Translate model property to datastore value."""
blob_info = getattr(model_instance, self.name)
if blob_info is None:
return None
return blob_info.key()
def make_value_from_datastore(self, value):
"""Translate datastore value to BlobInfo."""
if value is None:
return None
return BlobInfo(value)
def validate(self, value):
"""Validate that assigned value is BlobInfo.
Automatically converts from strings and BlobKey instances.
"""
if isinstance(value, (basestring)):
value = BlobInfo(BlobKey(value))
elif isinstance(value, BlobKey):
value = BlobInfo(value)
return super(BlobReferenceProperty, self).validate(value)
def fetch_data(blob, start_index, end_index):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data(blob, start_index, end_index)
class BlobReader(object):
"""Provides a read-only file-like interface to a blobstore blob."""
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def __init__(self, blob_key, buffer_size=131072, position=0):
"""Constructor.
Args:
blob_key: The blob key or string blob key to read from.
buffer_size: The minimum size to fetch chunks of data from blobstore.
position: The initial position in the file.
"""
self.__blob_key = blob_key
self.__buffer_size = buffer_size
self.__buffer = ""
self.__position = position
self.__buffer_position = 0
self.__eof = False
self.__blob_info = None
def __iter__(self):
"""Returns a file iterator for this BlobReader."""
return self
def __getstate__(self):
"""Returns the serialized state for this BlobReader."""
return (self.__blob_key, self.__buffer_size, self.__position)
def __setstate__(self, state):
"""Restores pickled state for this BlobReader."""
self.__init__(*state)
def close(self):
"""Close the file.
A closed file cannot be read or written any more. Any operation which
requires that the file be open will raise a ValueError after the file has
been closed. Calling close() more than once is allowed.
"""
self.__blob_key = None
def flush(self):
raise IOError("BlobReaders are read-only")
def next(self):
"""Returns the next line from the file.
Returns:
A string, terminted by \n. The last line may not be terminated by \n.
If EOF is reached, an empty string will be returned.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def __read_from_buffer(self, size):
"""Reads at most size bytes from the buffer.
Args:
size: Number of bytes to read, or negative to read the entire buffer.
Returns:
Tuple (data, size):
data: The bytes read from the buffer.
size: The remaining unread byte count.
"""
if not self.__blob_key:
raise ValueError("File is closed")
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
data = self.__buffer[self.__buffer_position:end_pos]
data_length = len(data)
size -= data_length
self.__position += data_length
self.__buffer_position += data_length
if self.__buffer_position == len(self.__buffer):
self.__buffer = ""
self.__buffer_position = 0
return data, size
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
def read(self, size=-1):
"""Read at most size bytes from the file.
Fewer bytes are read if the read hits EOF before obtaining size bytes.
If the size argument is negative or omitted, read all data until EOF is
reached. The bytes are returned as a string object. An empty string is
returned when EOF is encountered immediately.
Calling read() without a size specified is likely to be dangerous, as it
may read excessive amounts of data.
Args:
size: Optional. The maximum number of bytes to read. When omitted, read()
returns all remaining data in the file.
Returns:
The read data, as a string.
"""
data_list = []
while True:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
return ''.join(data_list)
self.__fill_buffer(size)
def readline(self, size=-1):
"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent when a
file ends with an incomplete line). If the size argument is present and
non-negative, it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned. An empty string is returned only
when EOF is encountered immediately.
Args:
size: Optional. The maximum number of bytes to read.
Returns:
The read data, as a string.
"""
data_list = []
while True:
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
newline_pos = self.__buffer.find('\n', self.__buffer_position, end_pos)
if newline_pos != -1:
data_list.append(
self.__read_from_buffer(newline_pos
- self.__buffer_position + 1)[0])
break
else:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
break
self.__fill_buffer()
return ''.join(data_list)
def readlines(self, sizehint=None):
"""Read until EOF using readline() and return a list of lines thus read.
If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes (possibly after rounding
up to an internal buffer size) are read.
Args:
sizehint: A hint as to the maximum number of bytes to read.
Returns:
A list of strings, each being a single line from the file.
"""
lines = []
while sizehint is None or sizehint > 0:
line = self.readline()
if sizehint:
sizehint -= len(line)
if not line:
break
lines.append(line)
return lines
def seek(self, offset, whence=SEEK_SET):
"""Set the file's current position, like stdio's fseek().
The whence argument is optional and defaults to os.SEEK_SET or 0 (absolute
file positioning); other values are os.SEEK_CUR or 1 (seek relative to the
current position) and os.SEEK_END or 2 (seek relative to the file's end).
Args:
offset: The relative offset to seek to.
whence: Defines what the offset is relative to. See description for
details.
"""
if whence == BlobReader.SEEK_CUR:
offset = self.__position + offset
elif whence == BlobReader.SEEK_END:
offset = self.blob_info.size + offset
self.__buffer = ""
self.__buffer_position = 0
self.__position = offset
self.__eof = False
def tell(self):
"""Return the file's current position, like stdio's ftell()."""
return self.__position
def truncate(self, size):
raise IOError("BlobReaders are read-only")
def write(self, str):
raise IOError("BlobReaders are read-only")
def writelines(self, sequence):
raise IOError("BlobReaders are read-only")
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
@property
def closed(self):
"""Returns True if this file is closed, False otherwise."""
return self.__blob_key is None
| 31.038352 | 80 | 0.687932 |
78c39b2cc3b176cedaf1da6c71f0ed5fb71a7e89 | 14,439 | py | Python | ja3toMISP.py | eCrimeLabs/ja3toMISP | 41b9ffe0f61fec92859450beeee0942af7c51a16 | [
"MIT"
] | 8 | 2018-12-30T17:02:08.000Z | 2021-10-05T17:26:14.000Z | ja3toMISP.py | eCrimeLabs/ja3toMISP | 41b9ffe0f61fec92859450beeee0942af7c51a16 | [
"MIT"
] | null | null | null | ja3toMISP.py | eCrimeLabs/ja3toMISP | 41b9ffe0f61fec92859450beeee0942af7c51a16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Generate JA3 fingerprints from PCAPs using Python3.
and then either add to an existing event or create
new event with the information from the PCAP's
The calculation of the JA3 fingerprint is originally from:
https://github.com/salesforce/ja3/blob/master/python/ja3/ja3.py
MIT License
Copyright (c) 2019 Dennis Rand (https://www.ecrimelabs.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import dpkt
import json
import socket
import struct
import sys
import pprint
from datetime import datetime
from hashlib import md5
from pymisp import PyMISP, MISPEvent, MISPObject, MISPSighting
from keys import misp_url, misp_key, misp_verifycert, proxies
__author__ = "Dennis Rand - eCrimeLabs"
__copyright__ = "Copyright (c) 2019, eCrimeLabs"
__credits__ = ["Tommy Stallings", "John B. Althouse", "Jeff Atkinson", "Josh Atkins"]
__version__ = "1.0.0"
__maintainer__ = "Dennis Rand"
GREASE_TABLE = {0x0a0a: True, 0x1a1a: True, 0x2a2a: True, 0x3a3a: True,
0x4a4a: True, 0x5a5a: True, 0x6a6a: True, 0x7a7a: True,
0x8a8a: True, 0x9a9a: True, 0xaaaa: True, 0xbaba: True,
0xcaca: True, 0xdada: True, 0xeaea: True, 0xfafa: True}
# GREASE_TABLE Ref: https://tools.ietf.org/html/draft-davidben-tls-grease-00
SSL_PORT = 443
TLS_HANDSHAKE = 22
sightings_to_add = []
def splash():
print ("\r\n")
print ('JA3 fingerprint to MISP Objects')
print ('(c)2018 eCrimeLabs')
print ('https://www.ecrimelabs.com')
print ("----------------------------------------\r\n")
def init(misp_url, misp_key):
return PyMISP(misp_url, misp_key, misp_verifycert, 'json', debug=False, proxies=proxies)
def sighting(uuid, value, type, source, timestamp, misp):
'''Add a sighting'''
s = MISPSighting()
s.from_dict(value=value, source=source, timestamp=int(timestamp), type=type)
misp.set_sightings(s)
def create_misp_objects(ja3_objects, misp_event, pcap_filename, misp, to_ids):
event = MISPEvent()
event.from_dict(**misp_event)
print ("- Creating object(s)")
for ja3_object in ja3_objects:
ja3_digest = (ja3_objects[ja3_object]['ja3_digest'])
destination_ip = (ja3_objects[ja3_object]['destination_ip'])
source_ip = (ja3_objects[ja3_object]['source_ip'])
unixtime = (ja3_objects[ja3_object]['timestamp'])
timestamp = (ja3_objects[ja3_object]['time'])
if not ( is_in_misp_event(misp_event, ja3_digest, destination_ip) ):
print ("\t " + ja3_digest + " -> " + destination_ip + " -> " + source_ip)
misp_object = event.add_object(name='ja3', comment=pcap_filename, distribution=5, standalone=False)
obj_attr = misp_object.add_attribute('ja3-fingerprint-md5', value=ja3_digest, distribution=5)
sightings_to_add.append((obj_attr['uuid'], ja3_digest, 0, "PCAP", unixtime))
misp_object.add_attribute('ip-src', value=source_ip, to_ids=to_ids, distribution=5)
misp_object.add_attribute('ip-dst', value=destination_ip, to_ids=to_ids, distribution=5)
misp_object.add_attribute('first-seen', value=timestamp, disable_correlation=True, distribution=5)
try:
misp.update(event)
except (KeyError, RuntimeError, TypeError, NameError):
print ("An error occoured when updating the event")
sys.exit()
print ("- The MISP objects seems to have been added correctly to the event \r\n")
def is_in_misp_event(misp_event, ja3_digest, destination_ip):
found = False
for obj_loop in misp_event['Object']:
found_ja3 = False
found_ip = False
for attr_loop in obj_loop['Attribute']:
if(attr_loop['value'] == ja3_digest):
found_ja3 = True
elif(attr_loop['value'] == destination_ip):
found_ip = True
if( (found_ja3) and found_ip ):
found = True
return(found)
def convert_ip(value):
"""Convert an IP address from binary to text.
:param value: Raw binary data to convert
:type value: str
:returns: str
"""
try:
return socket.inet_ntop(socket.AF_INET, value)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, value)
def parse_variable_array(buf, byte_len):
"""Unpack data from buffer of specific length.
:param buf: Buffer to operate on
:type buf: bytes
:param byte_len: Length to process
:type byte_len: int
:returns: bytes, int
"""
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
assert byte_len <= 4
size_format = _SIZE_FORMATS[byte_len - 1]
padding = b'\x00' if byte_len == 3 else b''
size = struct.unpack(size_format, padding + buf[:byte_len])[0]
data = buf[byte_len:byte_len + size]
return data, size + byte_len
def ntoh(buf):
"""Convert to network order.
:param buf: Bytes to convert
:type buf: bytearray
:returns: int
"""
if len(buf) == 1:
return buf[0]
elif len(buf) == 2:
return struct.unpack('!H', buf)[0]
elif len(buf) == 4:
return struct.unpack('!I', buf)[0]
else:
raise ValueError('Invalid input buffer size for NTOH')
def convert_to_ja3_segment(data, element_width):
"""Convert a packed array of elements to a JA3 segment.
:param data: Current PCAP buffer item
:type: str
:param element_width: Byte count to process at a time
:type element_width: int
:returns: str
"""
int_vals = list()
data = bytearray(data)
if len(data) % element_width:
message = '{count} is not a multiple of {width}'
message = message.format(count=len(data), width=element_width)
raise ValueError(message)
for i in range(0, len(data), element_width):
element = ntoh(data[i: i + element_width])
if element not in GREASE_TABLE:
int_vals.append(element)
return "-".join(str(x) for x in int_vals)
def process_extensions(client_handshake):
"""Process any extra extensions and convert to a JA3 segment.
:param client_handshake: Handshake data from the packet
:type client_handshake: dpkt.ssl.TLSClientHello
:returns: list
"""
if not hasattr(client_handshake, "extensions"):
# Needed to preserve commas on the join
return ["", "", ""]
exts = list()
elliptic_curve = ""
elliptic_curve_point_format = ""
for ext_val, ext_data in client_handshake.extensions:
if not GREASE_TABLE.get(ext_val):
exts.append(ext_val)
if ext_val == 0x0a:
a, b = parse_variable_array(ext_data, 2)
# Elliptic curve points (16 bit values)
elliptic_curve = convert_to_ja3_segment(a, 2)
elif ext_val == 0x0b:
a, b = parse_variable_array(ext_data, 1)
# Elliptic curve point formats (8 bit values)
elliptic_curve_point_format = convert_to_ja3_segment(a, 1)
else:
continue
results = list()
results.append("-".join([str(x) for x in exts]))
results.append(elliptic_curve)
results.append(elliptic_curve_point_format)
return results
def process_pcap(pcap, any_port=False):
"""Process packets within the PCAP.
:param pcap: Opened PCAP file to be processed
:type pcap: dpkt.pcap.Reader
:param any_port: Whether or not to search for non-SSL ports
:type any_port: bool
"""
results = dict() # list()
for timestamp, buf in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
except Exception:
continue
if not isinstance(eth.data, dpkt.ip.IP):
# We want an IP packet
continue
if not isinstance(eth.data.data, dpkt.tcp.TCP):
# TCP only
continue
ip = eth.data
tcp = ip.data
if not (tcp.dport == SSL_PORT or tcp.sport == SSL_PORT or any_port):
# Doesn't match SSL port or we are picky
continue
if len(tcp.data) <= 0:
continue
tls_handshake = bytearray(tcp.data)
if tls_handshake[0] != TLS_HANDSHAKE:
continue
records = list()
try:
records, bytes_used = dpkt.ssl.tls_multi_factory(tcp.data)
except dpkt.ssl.SSL3Exception:
continue
except dpkt.dpkt.NeedData:
continue
if len(records) <= 0:
continue
for record in records:
if record.type != TLS_HANDSHAKE:
continue
if len(record.data) == 0:
continue
client_hello = bytearray(record.data)
if client_hello[0] != 1:
# We only want client HELLO
continue
try:
handshake = dpkt.ssl.TLSHandshake(record.data)
except dpkt.dpkt.NeedData:
# Looking for a handshake here
continue
if not isinstance(handshake.data, dpkt.ssl.TLSClientHello):
# Still not the HELLO
continue
client_handshake = handshake.data
buf, ptr = parse_variable_array(client_handshake.data, 1)
buf, ptr = parse_variable_array(client_handshake.data[ptr:], 2)
ja3 = [str(client_handshake.version)]
# Cipher Suites (16 bit values)
ja3.append(convert_to_ja3_segment(buf, 2))
ja3 += process_extensions(client_handshake)
ja3 = ",".join(ja3)
ja3_digest = md5(ja3.encode()).hexdigest()
data = dict()
data = {"source_ip": convert_ip(ip.src),
"destination_ip": convert_ip(ip.dst),
"source_port": tcp.sport,
"destination_port": tcp.dport,
"ja3": ja3,
"ja3_digest": ja3_digest,
"timestamp": timestamp,
"time": datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')}
results[ja3_digest + '-' + convert_ip(ip.dst)] = data
return results
def main():
splash()
"""Intake arguments from the user and print out JA3 output."""
desc = "Extracting JA3 fingerprints from PCAP files, and importing into MISP as objects"
parser = argparse.ArgumentParser(description=(desc))
parser.add_argument("-f", "--file", required=True, help="The pcap file to process")
parser.add_argument("-a", "--any_port", required=False,
action="store_true", default=False,
help="Look for client hellos on any port instead of just 443")
parser.add_argument("-j", "--json", required=False, action="store_true",
help="Print out as JSON records for downstream parsing or for debug reasons")
parser.add_argument("-c", "--create", required=False, type=str,
help="Create a new MISP event with the specified name")
parser.add_argument("-u", "--uuid", required=False, type=str,
help="Add to an allready existing event (input has to be UUID)")
parser.add_argument("-i", "--ids", required=False, action="store_true", default=False,
help="Adds the to_ids to the source and destination IP's")
parser.add_argument("-s", "--sightings", required=False, action="store_true", default=False,
help="Adds sighting to the JA3-fingerprint-md5")
args = parser.parse_args()
# Use an iterator to process each line of the file
ja3_objects = None
pcap_filename = args.file
to_ids = args.ids
with open(args.file, 'rb') as fp:
try:
capture = dpkt.pcap.Reader(fp)
except ValueError as e:
raise Exception("File doesn't appear to be a PCAP: %s" % e)
ja3_objects = process_pcap(capture, any_port=args.any_port)
if (args.json):
ja3_objects = json.dumps(ja3_objects, indent=4, sort_keys=True)
print(ja3_objects)
sys.exit(0)
elif (args.uuid):
# Add to existing UUID
try:
misp = init(misp_url, misp_key)
misp_event = misp.get_event(args.uuid)['Event']
except KeyError as e:
print ("An error occoured getting the UUID, either connection issues or UUID does not exits.")
sys.exit(0)
create_misp_objects(ja3_objects, misp_event, pcap_filename, misp, to_ids)
elif (args.create):
# Create a new event in MISP
try:
misp = init(misp_url, misp_key)
# distribution = "Your organisation only", threat level = "Undefined", Analysis = "Ongoing"
misp_event = misp.new_event(info=args.create, distribution=0, threat_level_id=4, analysis=1, published=False)
create_misp_objects(ja3_objects, misp_event['Event'], pcap_filename, misp, to_ids)
except KeyError as e:
print ("An error occoured creating a new MISP event.")
sys.exit(0)
pass
else:
pass
if (args.sightings):
print ("- Adding Sightings to JA3-fingerprint-md5 \r\n")
#uuid, value, type, source, timestamp
for suuid, svalue, stype, ssource, stimetamp in sightings_to_add:
sighting(suuid, svalue, stype, ssource, stimetamp, misp)
print ("\r\n")
if __name__ == "__main__":
main()
| 37.897638 | 121 | 0.630445 |
6703bd4fdf510cf5b7e1b97c245629980a8e26c6 | 6,434 | py | Python | src/ZServer/Zope2/utilities/zpasswd.py | gogobd/ZServer | d85912680297f67138ebe38409fcb36068b37c3d | [
"ZPL-2.1"
] | 4 | 2017-08-01T15:06:47.000Z | 2020-04-19T05:22:13.000Z | src/ZServer/Zope2/utilities/zpasswd.py | gogobd/ZServer | d85912680297f67138ebe38409fcb36068b37c3d | [
"ZPL-2.1"
] | 12 | 2017-06-21T03:56:04.000Z | 2021-03-29T05:47:10.000Z | src/ZServer/Zope2/utilities/zpasswd.py | gogobd/ZServer | d85912680297f67138ebe38409fcb36068b37c3d | [
"ZPL-2.1"
] | 7 | 2017-05-12T07:30:54.000Z | 2020-10-08T01:51:50.000Z | ##############################################################################
#
# Copyright (c) 2001,2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Zope user bootstrap system
Usage: %(PROGRAM)s [options] filename
If this program is called without command-line options, it will prompt
for all necessary information. The available options are:
-u / --username=
Set the username to be used for the initial user or the emergency user
-p / --password=
Set the password
-e / --encoding=
Set the encryption/encoding rules. Defaults to SHA-1. OPTIONAL
-d / --domains=
Set the domain names that the user user can log in from. Defaults to
any. OPTIONAL.
-h / --help
Print this help text and exit.
Filename is required and should be the name of the file to store the
information in (usually "inituser" or "access").
"""
import binascii
import getopt
import getpass
import os
import random
import sha
import sys
try:
from crypt import crypt
except ImportError:
crypt = None
if sys.version_info > (3, ):
raw_input = input
PROGRAM = sys.argv[0]
COMMASPACE = ', '
def generate_salt():
"""Generate a salt value for the crypt function."""
salt_choices = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789./")
return random.choice(salt_choices) + random.choice(salt_choices)
def generate_passwd(password, encoding):
encoding = encoding.upper()
if encoding == 'SHA':
pw = '{SHA}' + binascii.b2a_base64(sha.new(password).digest())[:-1]
elif encoding == 'CRYPT':
pw = '{CRYPT}' + crypt(password, generate_salt())
elif encoding == 'CLEARTEXT':
pw = password
else:
raise ValueError('Unsupported encoding: %s' % encoding)
return pw
def write_generated_password(home, ac_path, username):
pw_choices = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789!")
acfile = open(ac_path, 'w')
pw = ''
for i in range(8):
pw = pw + random.choice(pw_choices)
acfile.write('%s:%s\n' % (username, generate_passwd(pw, 'SHA')))
acfile.close()
os.chmod(ac_path, 0o644)
return pw
def write_access(home, user='', group=''):
ac_path = os.path.join(home, 'access')
if not os.path.exists(ac_path):
print('-' * 78)
print('creating default access file')
pw = write_generated_password(home, ac_path, 'emergency')
print("""Note:
The emergency user name and password are 'emergency'
and '%s'.
You can change the emergency name and password with the
zpasswd script. To find out more, type:
%s zpasswd.py
""" % (pw, sys.executable))
import do
do.ch(ac_path, user, group)
def get_password():
while 1:
password = getpass.getpass("Password: ")
verify = getpass.getpass("Verify password: ")
if verify == password:
return password
else:
password = verify = ''
print("Password mismatch, please try again...")
def write_inituser(home, user='', group=''):
ac_path = os.path.join(home, 'inituser')
if not os.path.exists(ac_path):
print('-' * 78)
print('creating default inituser file')
pw = write_generated_password(home, ac_path, 'admin')
print("""Note:
The initial user name and password are 'admin'
and '%s'.
You can change the name and password through the web
interface or using the 'zpasswd.py' script.
""" % pw)
import do
do.ch(ac_path, user, group)
def usage(code, msg=''):
sys.stderr.write(__doc__ % globals())
if msg:
sys.stderr.write(msg)
sys.exit(code)
def main():
shortopts = 'u:p:e:d:h'
longopts = ['username=',
'password=',
'encoding=',
'domains=',
'help']
try:
opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
except getopt.error as msg:
usage(1, msg)
# Defaults
username = password = None
domains = ''
encoding = 'SHA'
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-u', '--username'):
username = arg
elif opt in ('-p', '--password'):
password = arg
elif opt in ('-e', '--encoding'):
encoding = arg
elif opt in ('-d', '--domains'):
domains = ':' + arg
# Extra command line arguments?
if len(args) == 0:
usage(1, 'filename is required')
elif len(args) == 1:
access_file = open(args[0], 'w')
else:
usage(1, 'Extra command line arguments: ' + COMMASPACE.join(args))
if opts:
# There were some command line args, so verify
if username is not None and password is None:
password = get_password()
else:
# No command line args, so prompt
while 1:
username = raw_input("Username: ")
if username != '':
break
password = get_password()
while 1:
print("""
Please choose a format from:
SHA - SHA-1 hashed password (default)
CRYPT - UNIX-style crypt password
CLEARTEXT - no protection
""")
encoding = raw_input("Encoding: ")
if encoding == '':
encoding = 'SHA'
break
if encoding.upper() in ['SHA', 'CRYPT', 'CLEARTEXT']:
break
domains = raw_input("Domain restrictions: ")
if domains:
domains = ":" + domains
# Done with prompts and args
access_file.write(username + ":" +
generate_passwd(password, encoding) +
domains)
# If called from the command line
if __name__ == '__main__':
main()
| 27.852814 | 78 | 0.57709 |
3a5fc479e72087570045846b38ea35d29a5bdf29 | 86 | py | Python | 2020/network/network/apps.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/apps.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/apps.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class NetworkConfig(AppConfig):
name = 'network'
| 14.333333 | 33 | 0.77907 |
7383da232cb654ff4d7a8f7fb2c5799569799c2b | 3,617 | py | Python | tests/gold_tests/tls_hooks/tls_hooks.test.py | heroku-miraheze/trafficserver | b4c9cf1668c5b464064c336800e049c11e659929 | [
"Apache-2.0"
] | 1 | 2020-04-20T14:06:36.000Z | 2020-04-20T14:06:36.000Z | tests/gold_tests/tls_hooks/tls_hooks.test.py | heroku-miraheze/trafficserver | b4c9cf1668c5b464064c336800e049c11e659929 | [
"Apache-2.0"
] | 3 | 2017-09-22T19:18:56.000Z | 2021-06-21T18:07:14.000Z | tests/gold_tests/tls_hooks/tls_hooks.test.py | heroku-miraheze/trafficserver | b4c9cf1668c5b464064c336800e049c11e659929 | [
"Apache-2.0"
] | 1 | 2020-03-13T00:17:20.000Z | 2020-03-13T00:17:20.000Z | '''
Test 1 preaccept callback (without delay)
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server")
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.TLSv1_3': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)
)
Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssl_hook_test.cc'), ts, '-preaccept=1')
tr = Test.AddTestRun("Test one preaccept hook")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -v -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/preaccept-1.gold"
tr.Processes.Default.Streams.All = Testers.ExcludesExpression("TLSv1.3 (IN), TLS handshake, Finished (20):", "Should not negotiate a TLSv1.3 connection")
ts.Streams.stderr = "gold/ts-preaccept-1.gold"
# the preaccept may get triggered twice because the test framework creates a TCP connection before handing off to traffic_server
preacceptstring = "Pre accept callback 0"
ts.Streams.All = Testers.ContainsExpression(
"\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring), "Pre accept message appears only once or twice", reflags=re.S | re.M)
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
| 47.592105 | 332 | 0.743434 |
e631b9b9205dbfd543b8508ea9281dbb258ee4c0 | 1,430 | py | Python | Coursera/Parte 1/Desafios/Semana 3/bhaskara.py | Digitalen-Brasil/Python | 39bcdfc95596ffe7d40f593faed44a87633e56f5 | [
"MIT"
] | null | null | null | Coursera/Parte 1/Desafios/Semana 3/bhaskara.py | Digitalen-Brasil/Python | 39bcdfc95596ffe7d40f593faed44a87633e56f5 | [
"MIT"
] | null | null | null | Coursera/Parte 1/Desafios/Semana 3/bhaskara.py | Digitalen-Brasil/Python | 39bcdfc95596ffe7d40f593faed44a87633e56f5 | [
"MIT"
] | 1 | 2021-04-09T14:50:13.000Z | 2021-04-09T14:50:13.000Z | # Programa para encontrar raiz quadrada de uma função de segundo grau
import math
print('A forma geral da equação de segundo grau é: \033[1max² + bx - c = 0\033[m'
'\n"a", "b" e "c" são os valores constantes da equação, ou seja, os valores conhecidos.\n')
# Entrada das constantes pelo usuário
a = float(input('Qual o valor de "a"? '))
b = float(input('Qual o valor de "b"? '))
c = float(input('Qual o valor de "c"? '))
# Calculo do delta, utilizando a formula: Delta = b² - 4.a.c
delta = (b**2) - (4*a*c)
# Não existe raiz quadrada de número negativo, logo, não existem soluções reais para o problema
if delta < 0:
print('esta equação não possui raízes reais')
# Se Delta for maior que é, é calculado X' e X'', ou x e y.
# O comando para calcular raiz quadrada no módulo math é: "math.sqrt()"
if delta > 0:
# Fórmula para encontrar o primeiro X é: x = (-b + Raiz Quadrada de Delta) / (2.a)
x = (-b+math.sqrt(delta))/(2*a)
# Fórmula para encontrar o primeiro X é: x = (-b - Raiz Quadrada de Delta) / (2.a)
y = (-b-math.sqrt(delta))/(2*a)
if x <= y == True:
print('as raízes da equação são {} e {}'.format(x,y))
else:
print('as raízes da equação são {} e {}'.format(y, x))
if delta == 0:
# Quando delta é igual a zero, só existe uma solução real, encontrada pela fórmula: x = (-b+delta)/2
zero = (-b+delta)/(2*a)
print('a raiz desta equação é {}'.format(zero)) | 38.648649 | 104 | 0.633566 |
1d94257dc7ba02feb691251661e7c99dd9551343 | 12,268 | py | Python | pysnmp/CISCO-SWITCH-ENGINE-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CISCO-SWITCH-ENGINE-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CISCO-SWITCH-ENGINE-CAPABILITY.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-SWITCH-ENGINE-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SWITCH-ENGINE-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:56:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
InetAddressType, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType")
AgentCapabilities, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, ModuleIdentity, Gauge32, iso, Counter64, IpAddress, MibIdentifier, NotificationType, Counter32, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "ModuleIdentity", "Gauge32", "iso", "Counter64", "IpAddress", "MibIdentifier", "NotificationType", "Counter32", "Bits", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoSwitchEngineCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 343))
ciscoSwitchEngineCapability.setRevisions(('2013-07-25 00:00', '2012-09-10 00:00', '2011-09-28 00:00', '2010-11-11 00:00', '2010-03-22 00:00', '2008-10-30 00:00', '2007-07-16 00:00', '2005-09-16 00:00', '2005-08-24 00:00', '2004-12-22 00:00', '2004-06-14 00:00', '2004-01-15 00:00', '2003-12-04 00:00', '2003-08-12 00:00',))
if mibBuilder.loadTexts: ciscoSwitchEngineCapability.setLastUpdated('201307250000Z')
if mibBuilder.loadTexts: ciscoSwitchEngineCapability.setOrganization('Cisco Systems, Inc.')
cseCapCatOSV08R0101Cat6KPfc = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc = cseCapCatOSV08R0101Cat6KPfc.setProductRelease('Cisco CatOS 8.1(1) on Catalyst 6000/6500\n series devices with PFC card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc = cseCapCatOSV08R0101Cat6KPfc.setStatus('current')
cseCapCatOSV08R0101Cat6KPfc2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc2 = cseCapCatOSV08R0101Cat6KPfc2.setProductRelease('Cisco CatOS 8.1(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC2 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc2 = cseCapCatOSV08R0101Cat6KPfc2.setStatus('current')
cseCapCatOSV08R0101Cat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc3 = cseCapCatOSV08R0101Cat6KPfc3.setProductRelease('Cisco CatOS 8.1(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0101Cat6KPfc3 = cseCapCatOSV08R0101Cat6KPfc3.setStatus('current')
cseCapV12R0119ECat6KPfc = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0119ECat6KPfc = cseCapV12R0119ECat6KPfc.setProductRelease('Cisco IOS 12.1(19E) on Catalyst 6000/6500\n series devices with PFC card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0119ECat6KPfc = cseCapV12R0119ECat6KPfc.setStatus('current')
cseCapV12R0119ECat6KPfc2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0119ECat6KPfc2 = cseCapV12R0119ECat6KPfc2.setProductRelease('Cisco IOS 12.1(19E) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC2 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0119ECat6KPfc2 = cseCapV12R0119ECat6KPfc2.setStatus('current')
cseCapV12R0217SXCat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 6))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0217SXCat6KPfc3 = cseCapV12R0217SXCat6KPfc3.setProductRelease('Cisco IOS 12.2(17SX) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0217SXCat6KPfc3 = cseCapV12R0217SXCat6KPfc3.setStatus('current')
cseCapCatOSV08R0301Cat6KPfc2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 7))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0301Cat6KPfc2 = cseCapCatOSV08R0301Cat6KPfc2.setProductRelease('Cisco CatOS 8.3(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC2 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0301Cat6KPfc2 = cseCapCatOSV08R0301Cat6KPfc2.setStatus('current')
cseCapCatOSV08R0301Cat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 8))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0301Cat6KPfc3 = cseCapCatOSV08R0301Cat6KPfc3.setProductRelease('Cisco CatOS 8.3(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0301Cat6KPfc3 = cseCapCatOSV08R0301Cat6KPfc3.setStatus('current')
cseCapCatOSV08R0401Cat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 9))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0401Cat6KPfc3 = cseCapCatOSV08R0401Cat6KPfc3.setProductRelease('Cisco CatOS 8.4(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0401Cat6KPfc3 = cseCapCatOSV08R0401Cat6KPfc3.setStatus('current')
cseCapV12R0218SXEPCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 10))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0218SXEPCat6K = cseCapV12R0218SXEPCat6K.setProductRelease('Cisco IOS 12.2(18)SXE on Catalyst 6000/6500\n and Cisco 7600 series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0218SXEPCat6K = cseCapV12R0218SXEPCat6K.setStatus('current')
cseCapCatOSV08R0501PCat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 11))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0501PCat6KPfc3 = cseCapCatOSV08R0501PCat6KPfc3.setProductRelease('Cisco CatOS 8.5(1) on Catalyst 6000/6500\n and Cisco 7600 series devices with \n PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapCatOSV08R0501PCat6KPfc3 = cseCapCatOSV08R0501PCat6KPfc3.setStatus('current')
cseCapV12R0233SXHPCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 12))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXHPCat6K = cseCapV12R0233SXHPCat6K.setProductRelease('Cisco IOS 12.2(33)SXH on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXHPCat6K = cseCapV12R0233SXHPCat6K.setStatus('current')
cseCapV12R0233SXIPCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 13))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXIPCat6K = cseCapV12R0233SXIPCat6K.setProductRelease('Cisco IOS 12.2(33)SXI on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXIPCat6K = cseCapV12R0233SXIPCat6K.setStatus('current')
cseCapV12R0233SXI4PCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 14))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXI4PCat6K = cseCapV12R0233SXI4PCat6K.setProductRelease('Cisco IOS 12.2(33)SXI4 on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0233SXI4PCat6K = cseCapV12R0233SXI4PCat6K.setStatus('current')
cseCapV12R0250SYPCat6KPfc4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 15))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0250SYPCat6KPfc4 = cseCapV12R0250SYPCat6KPfc4.setProductRelease('Cisco IOS 12.2(50)SY on Catalyst 6000/6500\n series devices for PFC4 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV12R0250SYPCat6KPfc4 = cseCapV12R0250SYPCat6KPfc4.setStatus('current')
cseCapV15R0001SYPCat6kPfc4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 16))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0001SYPCat6kPfc4 = cseCapV15R0001SYPCat6kPfc4.setProductRelease('Cisco IOS 15.0(1)SY on Catalyst 6000/6500\n series devices for PFC4 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0001SYPCat6kPfc4 = cseCapV15R0001SYPCat6kPfc4.setStatus('current')
cseCapV15R0101SYPCat6kPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 17))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0101SYPCat6kPfc3 = cseCapV15R0101SYPCat6kPfc3.setProductRelease('Cisco IOS 15.1(1)SY on Catalyst 6000/6500\n series devices with PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0101SYPCat6kPfc3 = cseCapV15R0101SYPCat6kPfc3.setStatus('current')
cseCapV15R0101SYPCat6kPfc4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 18))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0101SYPCat6kPfc4 = cseCapV15R0101SYPCat6kPfc4.setProductRelease('Cisco IOS 15.1(1)SY on Catalyst 6000/6500\n series devices for PFC4 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapV15R0101SYPCat6kPfc4 = cseCapV15R0101SYPCat6kPfc4.setStatus('current')
cseCapNxOSV06R0104PN7k = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 343, 19))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapNxOSV06R0104PN7k = cseCapNxOSV06R0104PN7k.setProductRelease('Cisco NX-OS 6.1(4) on Nexus 7000 series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cseCapNxOSV06R0104PN7k = cseCapNxOSV06R0104PN7k.setStatus('current')
mibBuilder.exportSymbols("CISCO-SWITCH-ENGINE-CAPABILITY", cseCapV15R0001SYPCat6kPfc4=cseCapV15R0001SYPCat6kPfc4, cseCapV12R0218SXEPCat6K=cseCapV12R0218SXEPCat6K, cseCapV12R0233SXHPCat6K=cseCapV12R0233SXHPCat6K, cseCapCatOSV08R0101Cat6KPfc3=cseCapCatOSV08R0101Cat6KPfc3, cseCapV15R0101SYPCat6kPfc4=cseCapV15R0101SYPCat6kPfc4, cseCapV12R0233SXIPCat6K=cseCapV12R0233SXIPCat6K, cseCapV12R0233SXI4PCat6K=cseCapV12R0233SXI4PCat6K, cseCapCatOSV08R0301Cat6KPfc2=cseCapCatOSV08R0301Cat6KPfc2, cseCapV12R0119ECat6KPfc2=cseCapV12R0119ECat6KPfc2, cseCapV15R0101SYPCat6kPfc3=cseCapV15R0101SYPCat6kPfc3, cseCapCatOSV08R0301Cat6KPfc3=cseCapCatOSV08R0301Cat6KPfc3, cseCapV12R0250SYPCat6KPfc4=cseCapV12R0250SYPCat6KPfc4, cseCapV12R0217SXCat6KPfc3=cseCapV12R0217SXCat6KPfc3, cseCapCatOSV08R0401Cat6KPfc3=cseCapCatOSV08R0401Cat6KPfc3, cseCapV12R0119ECat6KPfc=cseCapV12R0119ECat6KPfc, PYSNMP_MODULE_ID=ciscoSwitchEngineCapability, ciscoSwitchEngineCapability=ciscoSwitchEngineCapability, cseCapCatOSV08R0101Cat6KPfc=cseCapCatOSV08R0101Cat6KPfc, cseCapCatOSV08R0501PCat6KPfc3=cseCapCatOSV08R0501PCat6KPfc3, cseCapNxOSV06R0104PN7k=cseCapNxOSV06R0104PN7k, cseCapCatOSV08R0101Cat6KPfc2=cseCapCatOSV08R0101Cat6KPfc2)
| 105.758621 | 1,192 | 0.726606 |
d3144bd874464433a57f8a8e41f2b6c6bbc81dd6 | 1,703 | py | Python | output/models/ibm_data/mixed/assertions/test17_xsd/test17.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ibm_data/mixed/assertions/test17_xsd/test17.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ibm_data/mixed/assertions/test17_xsd/test17.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class X:
a: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
b: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
c: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
d: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Y:
a: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
b: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
c: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
d: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Test:
class Meta:
name = "test"
x: Optional[X] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
y: Optional[Y] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
| 18.922222 | 40 | 0.413388 |
3d16f68db202a4326087c3fe4fc5eb17887abe17 | 270 | py | Python | python/p215.py | forewing/lc | 314468a1a3bb7d38eccf1f34b0d1b7da04a34784 | [
"CC0-1.0"
] | null | null | null | python/p215.py | forewing/lc | 314468a1a3bb7d38eccf1f34b0d1b7da04a34784 | [
"CC0-1.0"
] | null | null | null | python/p215.py | forewing/lc | 314468a1a3bb7d38eccf1f34b0d1b7da04a34784 | [
"CC0-1.0"
] | null | null | null | import heapq
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
q = [nums[i] for i in range(k)]
heapq.heapify(q)
for i in range(k, len(nums)):
heapq.heappushpop(q, nums[i])
return heapq.heappop(q)
| 24.545455 | 61 | 0.57037 |
acca6ad659a4c2908743f08112b052f8e95483b5 | 5,479 | py | Python | run_classifier.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | 4 | 2020-06-21T15:48:40.000Z | 2022-01-24T05:10:59.000Z | run_classifier.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | null | null | null | run_classifier.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | 3 | 2020-07-20T07:11:27.000Z | 2022-01-24T05:11:21.000Z | import tensorflow as tf
from utils import performance
from utils import optimization
from utils import keras_utils
from utils.data_utils import create_classifier_dataset
from utils import distribution_utils
from models import BertClassifier
from configs import AlbertConfig, BertConfig
import math
import os
import time
import keras.backend as K
def get_optimizer(initial_lr, steps_per_epoch, epochs, warmup_steps, use_float16=False):
optimizer = optimization.create_optimizer(initial_lr, steps_per_epoch * epochs, warmup_steps)
optimizer = performance.configure_optimizer(
optimizer,
use_float16=use_float16,
use_graph_rewrite=False)
return optimizer
def get_loss_fn(num_classes):
"""Gets the classification loss function."""
def classification_loss_fn(labels, logits):
"""Classification loss."""
# K.print_tensor(labels, message=',y_true = ')
# K.print_tensor(logits[0], message=',y_predict = ')
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * tf.cast(log_probs, tf.float32), axis=-1)
return tf.reduce_mean(per_example_loss)
return classification_loss_fn
def metric_fn():
return tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy', dtype=tf.float32)
def get_callbacks(train_batch_size, log_steps, model_dir):
custom_callback = keras_utils.TimeHistory(
batch_size=train_batch_size,
log_steps=log_steps,
logdir=os.path.join(model_dir, 'logs'))
summary_callback = tf.keras.callbacks.TensorBoard(os.path.join(model_dir, 'graph'), update_freq='batch')
checkpoint_path = os.path.join(model_dir, 'checkpoint-{epoch:02d}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True, save_freq=32000)
return [custom_callback, summary_callback, checkpoint_callback]
if __name__ == '__main__':
train_batch_size = 32
eval_batch_size = 64
sequence_length = 64
learning_rate = 2e-5
train_data_size = 368624 # 368624
eval_data_size = 52661 # 52661
steps_per_epoch = train_data_size // train_batch_size
epochs = 1
warmup_steps = int(epochs * train_data_size * 0.1 / train_batch_size)
eval_steps = int(math.ceil(eval_data_size / eval_batch_size))
num_classes = 2
log_steps = 1
model_dir = "results/classifier/1/"
# bert_config_file = "/home/geb/PycharmProjects/bert/vocab_file/bert_config.json"
# checkpoint_file = "/home/geb/PycharmProjects/bert/vocab_file/bert_model.ckpt"
bert_config_file = "/home/geb/PycharmProjects/bert_ngc/vocab_file/albert_zh/bert_config.json"
checkpoint_file = "/home/geb/PycharmProjects/bert_ngc/vocab_file/albert_zh/bert_model.ckpt"
checkpoint_path = "results/classifier/checkpoint-{:02d}".format(epochs)
saved_model_path = "saved_models/{}".format(int(time.time()))
train = True
predict = False
export = False
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy='one_device',
num_gpus=1,
tpu_address=False)
with strategy.scope():
if train:
# load data
train_data = create_classifier_dataset("tf_records/sentence_classifier/train.record0", sequence_length,
train_batch_size)
dev_data = create_classifier_dataset("tf_records/sentence_classifier/dev.record0", sequence_length,
eval_batch_size, False)
bert_config = AlbertConfig.from_json_file(bert_config_file)
cls = BertClassifier(bert_config, sequence_length, num_classes, output="logits")
cls.init_pre_training_weights(checkpoint_file)
optimizer = get_optimizer(learning_rate, steps_per_epoch, epochs, warmup_steps)
loss_fn = get_loss_fn(num_classes)
callbacks = get_callbacks(train_batch_size, log_steps, model_dir)
cls.compile(optimizer=optimizer, loss=loss_fn, metrics=[metric_fn()])
# print(cls._encoder_layer.get_layer("transformer/layer_0").get_weights()[0])
cls.fit(
train_data,
validation_data=dev_data,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=callbacks)
tf.keras.models.save_model(cls, saved_model_path, save_format='tf')
# print(cls._encoder_layer.get_layer("transformer/layer_0").get_weights()[0])
elif export:
bert_config = AlbertConfig.from_json_file(bert_config_file)
cls = BertClassifier(bert_config, sequence_length, num_classes)
cls.load_weights(checkpoint_path)
cls.predict({"input_word_ids": tf.ones([1, 64]), "input_mask": tf.ones([1, 64]), "input_type_ids": tf.zeros([1, 64])})
tf.keras.models.save_model(cls, saved_model_path, save_format='tf')
elif predict:
bert_config = AlbertConfig.from_json_file(bert_config_file)
cls = BertClassifier(bert_config, sequence_length, num_classes)
cls.load_weights(checkpoint_path)
# TODO ... | 39.417266 | 130 | 0.691185 |
9ac88c8e168aca6f563a302886691f74debe2a89 | 113 | py | Python | javalang/__init__.py | fermadeiral/javalang | 1d6ba16f21d5daa5ac663e898437402e6b420763 | [
"MIT"
] | null | null | null | javalang/__init__.py | fermadeiral/javalang | 1d6ba16f21d5daa5ac663e898437402e6b420763 | [
"MIT"
] | 1 | 2018-05-07T10:05:34.000Z | 2018-05-17T23:52:12.000Z | javalang/javalang/__init__.py | DarthPumpkin/github-search | 2dc8489380825b83e2d2773bc32d439bd30e1d00 | [
"MIT"
] | 1 | 2021-12-02T13:15:38.000Z | 2021-12-02T13:15:38.000Z |
from . import parser
from . import parse
from . import tokenizer
from . import javadoc
__version__ = "0.11.0"
| 12.555556 | 23 | 0.725664 |
5e7af1f4aedb1ed406e98438cda5ed7c2952ab02 | 6,431 | py | Python | nacc/uds3/blanks.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | null | null | null | nacc/uds3/blanks.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | 20 | 2020-05-20T16:04:13.000Z | 2020-07-28T16:10:15.000Z | nacc/uds3/blanks.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
# Copyright 2015-2016 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
import csv
import os
import re
import sys
def convert_rule_to_python(name: str, rule: str) -> bool:
"""
Converts the text `rule` into a python function.
The returned function accepts one argument of type `Packet`.
Example:
packet["FOO"] = "I should be blank!"
packet["BAR"] = 0
r = convert_rule_to_python("FOO", "Blank if Question 1 BAR = 0 (No)")
if packet["FOOBAR"] != "" and r(packet):
raise RuleError("FOO should be blank, but is not!")
:param name: Canonical name of the field
:param rule: Blanking rule text
"""
special_cases = {
'FTLDSUBT': _blanking_rule_ftldsubt,
'LEARNED': _blanking_rule_learned,
'ZIP': _blanking_rule_dummy,
'DECCLMOT': _blanking_rule_dummy,
'CRAFTDRE': _blanking_rule_dummy,
# Neuropath skip rules
'NPINF': _blanking_rule_dummy,
'NPHEMO': _blanking_rule_dummy,
'NPOLD': _blanking_rule_dummy,
'NPOLDD': _blanking_rule_dummy,
'NPFTDTAU': _blanking_rule_dummy,
'NPOFTD': _blanking_rule_dummy,
'NPNEC': _blanking_rule_dummy,
'NPPATH': _blanking_rule_dummy,
'NPPATHO': _blanking_rule_dummy,
'NPPATH2': _blanking_rule_dummy,
'NPPATH3': _blanking_rule_dummy,
'NPPATH6': _blanking_rule_dummy,
'NPPATH7': _blanking_rule_dummy,
'NPPATH4': _blanking_rule_dummy,
'NPPATH5': _blanking_rule_dummy,
'NPPATH8': _blanking_rule_dummy,
'NPPATH9': _blanking_rule_dummy,
'NPPATH10': _blanking_rule_dummy,
'NPPATH11': _blanking_rule_dummy,
}
single_value = re.compile(
r"Blank if( Question(s?))? *\w+ (?P<key>\w+) *(?P<eq>=|ne)"
r" (?P<value>\d+)([^-]|$)")
range_values = re.compile(
r"Blank if( Question(s?))? *\w+ (?P<key>\w+) *(?P<eq>=|ne)"
r" (?P<start>\d+)-(?P<stop>\d+)( |$)")
# First, check to see if the rule is a "Special Case"
if name in special_cases:
return special_cases[name]()
# Then, check to see if the rule is of the within-range type
m = range_values.match(rule)
if m:
return _blanking_rule_check_within_range(
m.group('key'), m.group('eq'), m.group('start'), m.group('stop'))
# Next, check to see if the rule is of the single-value type
m = single_value.match(rule)
if m:
return _blanking_rule_check_single_value(
m.group('key'), m.group('eq'), m.group('value'))
# Finally, raise an error since we do not know how to handle the rule
raise Exception("Could not parse Blanking rule: "+name)
def extract_blanks(csvfile):
with open(csvfile) as fp:
reader = csv.DictReader(fp)
blanks_fieldnames = [f for f in reader.fieldnames if 'BLANKS' in f]
for row in reader:
rules = '\t'.join([row[f] for f in blanks_fieldnames]).strip()
if rules:
yield "%s:\t%s" % (row['Data Element'], rules)
def _blanking_rule_check_single_value(key, eq, value):
def should_be_blank(packet):
""" Returns True if the value should be blank according to the rule """
if '=' == eq:
return packet[key] == value
elif 'ne' == eq:
return packet[key] != value
else:
raise ValueError("'eq' must be '=' or 'ne', not '%s'." % eq)
return should_be_blank
def _blanking_rule_check_within_range(key, eq, start, stop):
def should_be_blank(packet):
""" Returns True if the value should be blank according to the rule """
first = int(start)
last = int(stop)+1
if '=' == eq:
return packet[key] in range(first, last)
elif 'ne' == eq:
return packet[key] not in list(range(first, last))
else:
raise ValueError("'eq' must be '=' or 'ne', not '%s'." % eq)
return should_be_blank
def _blanking_rule_dummy():
return lambda packet: False
def _blanking_rule_ftldsubt():
# Blank if #14a PSP ne 1 and #14b CORT ne 1 and #14c FTLDMO ne 1
# and 14d FTLDNOS ne 1
return lambda packet: packet['PSP'] != 1 and packet['CORT'] != 1 and \
packet['FTLDMO'] != 1 and packet['FTLDNOS'] != 1
def _blanking_rule_learned():
# The two rules contradict each other:
# - Blank if Question 2a REFERSC ne 1
# - Blank if Question 2a REFERSC ne 2
# The intent appears to be "blank if REFERSC is 3, 4, 5, 6, 8, or 9", but
# that makes 6 individual blanking rules and the maximum is 5 (BLANKS1-5).
return lambda packet: packet['REFERSC'] in (3, 4, 5, 6, 8, 9)
def set_zeros_to_blanks(packet):
""" Sets specific fields to zero if they meet certain criteria """
def set_to_blank_if_zero(*field_names):
for field_name in field_names:
field = packet[field_name]
if field == 0:
field.value = ''
# M1
if packet['DECEASED'] == 1 or packet['DISCONT'] == 1:
set_to_blank_if_zero(
'RENURSE', 'RENAVAIL', 'RECOGIM', 'REJOIN', 'REPHYILL',
'REREFUSE', 'FTLDDISC', 'CHANGEMO', 'CHANGEDY', 'CHANGEYR',
'PROTOCOL', 'ACONSENT', 'RECOGIM', 'REPHYILL', 'NURSEMO',
'NURSEDY', 'NURSEYR', 'FTLDREAS', 'FTLDREAX')
elif packet['DECEASED'] == 1:
# for just dead
set_to_blank_if_zero('DISCONT')
elif packet['DISCONT'] == 1:
# for just discont
set_to_blank_if_zero('DECEASED')
def main():
"""
Extracts all blanking rules from all DED files in a specified directory.
Usage:
python blanks.py ./ded_ivp
Note: this module is more useful as an imported module; see
`convert_rule_to_python`.
"""
data_dict_path = './ded_ivp'
if len(sys.argv) > 1:
data_dict_path = sys.argv[1]
deds = [f for f in os.listdir(data_dict_path) if f.endswith('.csv')]
for ded in deds:
for rule in extract_blanks(os.path.join(data_dict_path, ded)):
print(rule)
if __name__ == '__main__':
main()
| 34.207447 | 79 | 0.594309 |
37359f423e388469d10c28e755ee0e0c9e920b13 | 423 | py | Python | local_run.py | kfriedrichs/slurk | c46a4e4b3641ceaf383e2533d2463e8b7ae2c0b3 | [
"BSD-3-Clause"
] | null | null | null | local_run.py | kfriedrichs/slurk | c46a4e4b3641ceaf383e2533d2463e8b7ae2c0b3 | [
"BSD-3-Clause"
] | null | null | null | local_run.py | kfriedrichs/slurk | c46a4e4b3641ceaf383e2533d2463e8b7ae2c0b3 | [
"BSD-3-Clause"
] | 1 | 2021-11-19T18:14:04.000Z | 2021-11-19T18:14:04.000Z | import os
from gevent import monkey
monkey.patch_all(subprocess=True)
os.environ["SECRET_KEY"] = "TEST"
os.environ["DEBUG"] = "True"
from app import app, socketio
if __name__ == '__main__':
host = os.environ.get('HOST', '0.0.0.0')
port = int(os.environ.get('PORT', 5000))
socketio.run(app, host, port,
extra_files=["app/templates", "app/static/js", "app/static/css", "app/static/layouts"])
| 26.4375 | 104 | 0.654846 |
7347948ec0a7c88998d446436b0a93d3ec9516ab | 7,451 | py | Python | sizes.py | Les-Bell/eipaddress | 52a04483f08d83c6da7894a7c2f677f1075f7097 | [
"MIT"
] | 1 | 2021-03-08T09:39:24.000Z | 2021-03-08T09:39:24.000Z | sizes.py | Les-Bell/eipaddress | 52a04483f08d83c6da7894a7c2f677f1075f7097 | [
"MIT"
] | null | null | null | sizes.py | Les-Bell/eipaddress | 52a04483f08d83c6da7894a7c2f677f1075f7097 | [
"MIT"
] | null | null | null | """Report the size of an object, including the objects it contains.
This is based on a post by Aaron Hall, at the URL
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python/450034
with many improvements:
* include __slots__ from base classes
* include __dict__ from base classes
* include key values from each __dict__
* exclude built-in objects, they do not add to the size of an object
* exclude class attributes, they do not add to the size of an object
* a verbose option, to show the memory used by each child object
"""
import sys
from numbers import Number
from collections import Set, Mapping, deque
from types import ModuleType, FunctionType
from gc import get_referents
try: # Python 2
zero_depth_bases = basestring, Number, xrange, bytearray
iteritems = 'iteritems'
except NameError: # Python 3
zero_depth_bases = str, bytes, Number, range, bytearray
iteritems = 'items'
# modules, functions and type references do not add to the size of an object
EXCLUDE_TYPES = ModuleType, FunctionType, type
# builtin objects do not add to the size of an object
EXCLUDE_IDS = [id(getattr(__builtins__, x)) for x in dir(__builtins__)]
def class_attrs(obj):
"""Return a list of class attribute names for an object."""
if hasattr(obj, '__class__'):
return dir(obj.__class__)
return []
def is_class_attr(obj, name):
"""Check if a name is is a class attribute of an object.
Args:
obj: the object to check.
name: the attribute name to check for.
Returns:
True if name is a class attribute of the object, else False.
"""
return (name in class_attrs(obj) and
# the attribute must not be overridden by the object instance
id(getattr(obj, name)) == id(getattr(obj.__class__, name)))
def _all_dicts(T):
""" Return a list of all __dict__ for a type, or object.
Args:
T: the type, or object, to determine the __dicts__ for.
Returns:
The list of __dict__ references, including those in base classes.
"""
if not isinstance(T, type):
T = type(T)
dicts = []
def inner(T, dicts):
if hasattr(T, '__dict__'):
dicts.append(T.__dict__)
for c in T.__bases__:
inner(c, dicts)
inner(T, dicts)
return dicts
def _all_slots(T):
""" Return a list of all slots for a type, or object.
Args:
T: the type, or object to determine the slots for.
Returns:
The list of slot names, including those in base classes.
"""
if not isinstance(T, type):
T = type(T)
slots = []
def inner(T, slots):
if hasattr(T, '__slots__'):
slots += [s for s in T.__slots__]
for c in T.__bases__:
inner(c, slots)
inner(T, slots)
return slots
def attr_names(parent, child):
"""Return the attribute name(s) (in parent) for a child object.
Args:
parent: the parent object
child: the child object
Returns:
A list of attribute names, in the parent, referencing the child
object, or ['UNKNOWN'] if no names can be found for the child.
"""
names = []
if hasattr(parent, '__dict__') and child is parent.__dict__:
names.append('__dict__')
elif isinstance(parent, Set):
for k, v in enumerate(parent):
if id(v) == id(child):
names.append(str(k))
elif isinstance(parent, (tuple, list, deque)):
for k, v in enumerate(parent):
if id(v) == id(child):
names.append(str(k))
elif isinstance(parent, Mapping) or hasattr(parent, iteritems):
for k, v in getattr(parent, iteritems)():
if id(v) == id(child):
names.append(str(k))
for _dict in _all_dicts(parent):
for k, v in _dict.items():
if id(v) == id(child):
names.append(str(k))
for k in _all_slots(parent):
if hasattr(parent, k):
v = getattr(parent, k)
if id(v) == id(child):
names.append(str(k))
if not names:
names.append('<UNKNOWN>')
return names
def sizeof(obj, verbose = False):
"""Get the size of an object instance and its unique attributes.
The size returned is the size of the object, as returned by
sys.getsizeof, plus the sizes of all unique attributes it contains,
including:
* the attriute dictionary, '__dict__'
* the attributes referenced by '__dict__'
* the key values stored by '__dict__'
* the attributes referenced by '__slots__'
If there aare multiple attributes referencing the same object, the
referenced object is only counted once.
Attributes that are not unique to the object are excluded from the
size calculation, including references to:
* modules
* functions
* classes
* built-in objects
* class attributes (except where overridden by the instance)
Args:
obj: the object to determine the size of
verbose: A boolean, if True, print the attribute names and sizes
of each attribute referenced by the object.
Returns:
The size of the object, including its unique attributes.
"""
seen_ids = set(EXCLUDE_IDS)
def inner(obj, level = 0, name = ''):
obj_id = id(obj)
if isinstance(obj, EXCLUDE_TYPES) or id(obj) in seen_ids:
size = 0
elif is_class_attr(obj, name):
size = 0
else:
if verbose:
print('%s%s %s: %r' % (' ' * level, obj_id, name, obj))
seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(v, level + 1, '%s[%s]' % (name, k))
for k, v in enumerate(obj))
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k, level + 1, '%s[%s]k' % (name, k)) +
inner(v, level + 1, '%s[%s]v' % (name, k))
for k, v in getattr(obj, iteritems)())
# Check for custom object instances - may subclass above too
for _dict in _all_dicts(obj):
for k, v in _dict.items():
if not (isinstance(obj, EXCLUDE_TYPES) or
id(obj) in seen_ids or
# __slots__ are handled below
k == '__slots__'):
size += inner(k, level + 1, 'key: ' + k)
size += inner(v, level + 1, k)
for k in _all_slots(obj):
if hasattr(obj, k):
v = getattr(obj, k)
if not (isinstance(v, EXCLUDE_TYPES) or id(v) in seen_ids):
size += inner(v, level + 1, k)
# Check referents, in case anything was missed.
for o in get_referents(obj):
if not (isinstance(o, EXCLUDE_TYPES) or id(o) in seen_ids):
k = '|'.join(attr_names(obj, o))
size += inner(o, level + 1, '(%s)' % (k,))
if verbose:
print('%s%s %d %s: %r' % (' ' * level, obj_id, size, name, obj))
return size
return inner(obj)
| 35.650718 | 104 | 0.582472 |
d2f14600c7275f84072a921aa4cbb5e595aa37d4 | 125,462 | py | Python | mrcnn/model.py | tommywu6/Mask_RCNN_Demo | 11b7e343ab5e209234996414a3191c52c9d61634 | [
"MIT"
] | null | null | null | mrcnn/model.py | tommywu6/Mask_RCNN_Demo | 11b7e343ab5e209234996414a3191c52c9d61634 | [
"MIT"
] | null | null | null | mrcnn/model.py | tommywu6/Mask_RCNN_Demo | 11b7e343ab5e209234996414a3191c52c9d61634 | [
"MIT"
] | null | null | null | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
# change related API to make this file run mark rcnn demo competible
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import skimage.transform
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f} {}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else "",
array.dtype))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when inferencing
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layres
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
# tf.to_float() -> tf.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis]
tf.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
# tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is depricated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmentors that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return (augmenter.__class__.__name__ in MASK_AUGMENTERS)
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(skimage.transform.resize(
class_mask, (gt_h, gt_w), order=1, mode="constant")).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode="constant")
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, 3], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/[\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Create log_dir if not exists
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gausssian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also retruned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and noramlized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32) | 43.975464 | 115 | 0.610711 |
1b6692405b95e417889a24f63b6359f77f638602 | 37,864 | py | Python | absl/testing/tests/xml_reporter_test.py | em10100/abseil-py | 0f86230d77cd90ea1236f0cbee4df9a9b290ef66 | [
"Apache-2.0"
] | 1 | 2021-12-15T12:06:15.000Z | 2021-12-15T12:06:15.000Z | absl/testing/tests/xml_reporter_test.py | em10100/abseil-py | 0f86230d77cd90ea1236f0cbee4df9a9b290ef66 | [
"Apache-2.0"
] | null | null | null | absl/testing/tests/xml_reporter_test.py | em10100/abseil-py | 0f86230d77cd90ea1236f0cbee4df9a9b290ef66 | [
"Apache-2.0"
] | 1 | 2022-02-06T03:02:19.000Z | 2022-02-06T03:02:19.000Z | # Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from xml.etree import ElementTree
from xml.parsers import expat
from absl import logging
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
from absl.testing import xml_reporter
import mock
import six
class StringIOWriteLn(six.StringIO):
def writeln(self, line):
self.write(line + '\n')
class MockTest(absltest.TestCase):
failureException = AssertionError
def __init__(self, name):
super(MockTest, self).__init__()
self.name = name
def id(self):
return self.name
def runTest(self):
return
def shortDescription(self):
return "This is this test's description."
# str(exception_type) is different between Python 2 and 3.
def xml_escaped_exception_type(exception_type):
return xml_reporter._escape_xml_attr(str(exception_type))
OUTPUT_STRING = '\n'.join([
r'<\?xml version="1.0"\?>',
'<testsuites name="" tests="%(tests)d" failures="%(failures)d"'
' errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
'<testsuite name="%(suite_name)s" tests="%(tests)d"'
' failures="%(failures)d" errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
' <testcase name="%(test_name)s" status="%(status)s" result="%(result)s"'
' time="%(run_time).1f" classname="%(classname)s"'
' timestamp="%(start_time)s">%(message)s', ' </testcase>', '</testsuite>',
'</testsuites>'
])
FAILURE_MESSAGE = r"""
<failure message="e" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_failure
self.fail\(\'e\'\)
AssertionError: e
\]\]></failure>""".format(xml_escaped_exception_type(AssertionError))
ERROR_MESSAGE = r"""
<error message="invalid literal for int\(\) with base 10: (')?a(')?" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_error
int\('a'\)
ValueError: invalid literal for int\(\) with base 10: '?a'?
\]\]></error>""".format(xml_escaped_exception_type(ValueError))
UNICODE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_unicode_sample_failure
raise AssertionError\(u'\\xe9'\)
AssertionError: {0}
\]\]></%s>""".format(
r'\\xe9' if six.PY2 else r'\xe9',
xml_escaped_exception_type(AssertionError))
NEWLINE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_newline_message_sample_failure
raise AssertionError\(\'{2}'\)
AssertionError: {3}
\]\]></%s>""".format(
'new
line',
xml_escaped_exception_type(AssertionError),
r'new\\nline',
'new\nline')
UNEXPECTED_SUCCESS_MESSAGE = '\n'.join([
'',
r' <error message="" type=""><!\[CDATA\[Test case '
r'__main__.MockTest.unexpectedly_passing_test should have failed, '
r'but passed.\]\]></error>'])
UNICODE_ERROR_MESSAGE = UNICODE_MESSAGE % ('error', 'error')
NEWLINE_ERROR_MESSAGE = NEWLINE_MESSAGE % ('error', 'error')
class TextAndXMLTestResultTest(absltest.TestCase):
def setUp(self):
self.stream = StringIOWriteLn()
self.xml_stream = six.StringIO()
def _make_result(self, times):
timer = mock.Mock()
timer.side_effect = times
return xml_reporter._TextAndXMLTestResult(self.xml_stream, self.stream,
'foo', 0, timer)
def _assert_match(self, regex, output):
fail_msg = 'Expected regex:\n{}\nTo match:\n{}'.format(regex, output)
self.assertRegex(output, regex, fail_msg)
def _assert_valid_xml(self, xml_output):
try:
expat.ParserCreate().Parse(xml_output)
except expat.ExpatError as e:
raise AssertionError('Bad XML output: {}\n{}'.format(e, xml_output))
def _simulate_error_test(self, test, result):
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
def _simulate_failing_test(self, test, result):
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
def _simulate_passing_test(self, test, result):
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
def _iso_timestamp(self, timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).isoformat() + '+00:00'
def test_with_passing_test(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
subtest = unittest.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'passing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest_with_dots_in_parameter_name(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
subtest = unittest.case._SubTest(test, 'msg', {'case': 'a.b.c'})
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
re.escape(self._iso_timestamp(start_time),),
'test_name':
r'passing_test \[msg\] \(case='a.b.c'\)',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def get_sample_error(self):
try:
int('a')
except ValueError:
error_values = sys.exc_info()
return error_values
def get_sample_failure(self):
try:
self.fail('e')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_newline_message_sample_failure(self):
try:
raise AssertionError('new\nline')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_unicode_sample_failure(self):
try:
raise AssertionError(u'\xe9')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_terminal_escape_sample_failure(self):
try:
raise AssertionError('\x1b')
except AssertionError:
error_values = sys.exc_info()
return error_values
def test_with_failing_test(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_failing_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
subtest = unittest.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_failure())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'failing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.error_test')
subtest = unittest.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_error())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': r'error_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_fail_and_error_test(self):
"""Tests a failure and subsequent error within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
# This could happen in tearDown
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1, # Only the failure is tallied (because it was first).
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from failure and error should be concatenated in order.
'message': FAILURE_MESSAGE + ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_and_fail_test(self):
"""Tests an error and subsequent failure within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1, # Only the error is tallied (because it was first).
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from error and failure should be concatenated in order.
'message': ERROR_MESSAGE + FAILURE_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_newline_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_newline_message_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': NEWLINE_ERROR_MESSAGE
} + '\n'
self._assert_match(expected_re, xml)
def test_with_unicode_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_unicode_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNICODE_ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_terminal_escape_error(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_terminal_escape_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
self._assert_valid_xml(self.xml_stream.getvalue())
def test_with_expected_failure_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
error_values = ''
try:
raise RuntimeError('Test expectedFailure')
except RuntimeError:
error_values = sys.exc_info()
test = MockTest('__main__.MockTest.expected_failing_test')
result.startTestRun()
result.startTest(test)
result.addExpectedFailure(test, error_values)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'expected_failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(re.compile(expected_re, re.DOTALL),
self.xml_stream.getvalue())
def test_with_unexpected_success_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.unexpectedly_passing_test')
result.startTestRun()
result.startTest(test)
result.addUnexpectedSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'unexpectedly_passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNEXPECTED_SUCCESS_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_skipped_test(self):
start_time = 100
end_time = 100
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.skipped_test_with_reason')
result.startTestRun()
result.startTest(test)
result.addSkip(test, 'b"r')
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'skipped_test_with_reason',
'classname': '__main__.MockTest',
'status': 'notrun',
'result': 'suppressed',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_suite_time(self):
start_time1 = 100
end_time1 = 200
start_time2 = 400
end_time2 = 700
name = '__main__.MockTest.failing_test'
result = self._make_result((start_time1, start_time1, end_time1,
start_time2, end_time2, end_time2))
test = MockTest('%s1' % name)
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
test = MockTest('%s2' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = max(end_time1, end_time2) - min(start_time1, start_time2)
timestamp = self._iso_timestamp(start_time1)
expected_prefix = """<?xml version="1.0"?>
<testsuites name="" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
<testsuite name="MockTest" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
""" % (run_time, timestamp, run_time, timestamp)
xml_output = self.xml_stream.getvalue()
self.assertTrue(
xml_output.startswith(expected_prefix),
'%s not found in %s' % (expected_prefix, xml_output))
def test_with_no_suite_name(self):
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.bad_name')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': 'bad_name',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_unnamed_parameterized_testcase(self):
"""Test unnamed parameterized test cases.
Unnamed parameterized test cases might have non-alphanumeric characters in
their test method names. This test ensures xml_reporter handles them
correctly.
"""
class ParameterizedTest(parameterized.TestCase):
@parameterized.parameters(('a (b.c)',))
def test_prefix(self, case):
self.assertTrue(case.startswith('a'))
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = ParameterizedTest(methodName='test_prefix0')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
classname = xml_reporter._escape_xml_attr(
unittest.util.strclass(test.__class__))
expected_re = OUTPUT_STRING % {
'suite_name': 'ParameterizedTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'start_time': re.escape(self._iso_timestamp(start_time),),
'test_name': re.escape('test_prefix0 ('a (b.c)')'),
'classname': classname,
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def teststop_test_without_pending_test(self):
end_time = 1200
result = self._make_result((end_time,))
test = MockTest('__main__.MockTest.bad_name')
result.stopTest(test)
result.stopTestRun()
# Just verify that this doesn't crash
def test_text_and_xmltest_runner(self):
runner = xml_reporter.TextAndXMLTestRunner(self.xml_stream, self.stream,
'foo', 1)
result1 = runner._makeResult()
result2 = xml_reporter._TextAndXMLTestResult(None, None, None, 0, None)
self.failUnless(type(result1) is type(result2))
def test_timing_with_time_stub(self):
"""Make sure that timing is correct even if time.time is stubbed out."""
try:
saved_time = time.time
time.time = lambda: -1
reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,
self.stream,
'foo', 0)
test = MockTest('bar')
reporter.startTest(test)
self.failIf(reporter.start_time == -1)
finally:
time.time = saved_time
def test_concurrent_add_and_delete_pending_test_case_result(self):
"""Make sure adding/deleting pending test case results are thread safe."""
result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,
None)
def add_and_delete_pending_test_case_result(test_name):
test = MockTest(test_name)
result.addSuccess(test)
result.delete_pending_test_case_result(test)
for i in range(50):
add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)
self.assertEqual(result.pending_test_case_results, {})
def test_concurrent_test_runs(self):
"""Make sure concurrent test runs do not race each other."""
num_passing_tests = 20
num_failing_tests = 20
num_error_tests = 20
total_num_tests = num_passing_tests + num_failing_tests + num_error_tests
times = [0] + [i for i in range(2 * total_num_tests)
] + [2 * total_num_tests - 1]
result = self._make_result(times)
threads = []
names = []
result.startTestRun()
for i in range(num_passing_tests):
name = 'passing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
# xml_reporter uses id(test) as the test identifier.
# In a real testing scenario, all the test instances are created before
# running them. So all ids will be unique.
# We must do the same here: create test instance beforehand.
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_passing_test, args=(test, result)))
for i in range(num_failing_tests):
name = 'failing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_failing_test, args=(test, result)))
for i in range(num_error_tests):
name = 'error_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_error_test, args=(test, result)))
for t in threads:
t.start()
for t in threads:
t.join()
result.stopTestRun()
result.printErrors()
tests_not_in_xml = []
for tn in names:
if tn not in self.xml_stream.getvalue():
tests_not_in_xml.append(tn)
msg = ('Expected xml_stream to contain all test %s results, but %s tests '
'are missing. List of missing tests: %s' % (
total_num_tests, len(tests_not_in_xml), tests_not_in_xml))
self.assertEqual([], tests_not_in_xml, msg)
def test_add_failure_during_stop_test(self):
"""Tests an addFailure() call from within a stopTest() call stack."""
result = self._make_result((0, 2))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
# Replace parent stopTest method from unittest.TextTestResult with
# a version that calls self.addFailure().
with mock.patch.object(
unittest.TextTestResult,
'stopTest',
side_effect=lambda t: result.addFailure(t, self.get_sample_failure())):
# Run stopTest in a separate thread since we are looking to verify that
# it does not deadlock, and would otherwise prevent the test from
# completing.
stop_test_thread = threading.Thread(target=result.stopTest, args=(test,))
stop_test_thread.daemon = True
stop_test_thread.start()
stop_test_thread.join(10.0)
self.assertFalse(stop_test_thread.is_alive(),
'result.stopTest(test) call failed to complete')
class XMLTest(absltest.TestCase):
def test_escape_xml(self):
self.assertEqual(xml_reporter._escape_xml_attr('"Hi" <\'>\t\r\n'),
'"Hi" <'>	
')
class XmlReporterFixtureTest(absltest.TestCase):
def _get_helper(self):
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
return _bazelize_command.get_executable_path(binary_name)
def _run_test_and_get_xml(self, flag):
"""Runs xml_reporter_helper_test and returns an Element instance.
Runs xml_reporter_helper_test in a new process so that it can
exercise the entire test infrastructure, and easily test issues in
the test fixture.
Args:
flag: flag to pass to xml_reporter_helper_test
Returns:
The Element instance of the XML output.
"""
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
finally:
os.remove(xml_fname)
return xml
def _run_test(self, flag, num_errors, num_failures, suites):
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
logging.info('xml output is:\n%s', ElementTree.tostring(xml))
finally:
os.remove(xml_fname)
self.assertEqual(int(xml.attrib['errors']), num_errors)
self.assertEqual(int(xml.attrib['failures']), num_failures)
self.assertLen(xml, len(suites))
actual_suites = sorted(
xml.findall('testsuite'), key=lambda x: x.attrib['name'])
suites = sorted(suites, key=lambda x: x['name'])
for actual_suite, expected_suite in zip(actual_suites, suites):
self.assertEqual(actual_suite.attrib['name'], expected_suite['name'])
self.assertLen(actual_suite, len(expected_suite['cases']))
actual_cases = sorted(actual_suite.findall('testcase'),
key=lambda x: x.attrib['name'])
expected_cases = sorted(expected_suite['cases'], key=lambda x: x['name'])
for actual_case, expected_case in zip(actual_cases, expected_cases):
self.assertEqual(actual_case.attrib['name'], expected_case['name'])
self.assertEqual(actual_case.attrib['classname'],
expected_case['classname'])
if 'error' in expected_case:
actual_error = actual_case.find('error')
self.assertEqual(actual_error.attrib['message'],
expected_case['error'])
if 'failure' in expected_case:
actual_failure = actual_case.find('failure')
self.assertEqual(actual_failure.attrib['message'],
expected_case['failure'])
return xml
def test_set_up_module_error(self):
self._run_test(
flag='--set_up_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': '__main__',
'cases': [{'name': 'setUpModule',
'classname': '__main__',
'error': 'setUpModule Errored!'}]}])
def test_tear_down_module_error(self):
self._run_test(
flag='--tear_down_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'}]},
{'name': '__main__',
'cases': [{'name': 'tearDownModule',
'classname': '__main__',
'error': 'tearDownModule Errored!'}]}])
def test_set_up_class_error(self):
self._run_test(
flag='--set_up_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'setUpClass',
'classname': '__main__.FailableTest',
'error': 'setUpClass Errored!'}]}])
def test_tear_down_class_error(self):
self._run_test(
flag='--tear_down_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'},
{'name': 'tearDownClass',
'classname': '__main__.FailableTest',
'error': 'tearDownClass Errored!'}]}])
def test_set_up_error(self):
self._run_test(
flag='--set_up_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Errored!'}]}])
def test_tear_down_error(self):
self._run_test(
flag='--tear_down_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Errored!'}]}])
def test_test_error(self):
self._run_test(
flag='--test_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'test Errored!'}]}])
def test_set_up_failure(self):
if six.PY2:
# A failure in setUp() produces an error (not a failure), which is
# inconsistent with the Python unittest documentation. In Python
# 2.7, the bug appears to be in unittest.TestCase.run() method.
# Although it correctly checks for a SkipTest exception, it does
# not check for a failureException.
self._run_test(
flag='--set_up_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Failed!'}]}])
else:
self._run_test(
flag='--set_up_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'setUp Failed!'}]}])
def test_tear_down_failure(self):
if six.PY2:
# See comment in test_set_up_failure().
self._run_test(
flag='--tear_down_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Failed!'}]}])
else:
self._run_test(
flag='--tear_down_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'tearDown Failed!'}]}])
def test_test_fail(self):
self._run_test(
flag='--test_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'test Failed!'}]}])
def test_test_randomization_seed_logging(self):
# We expect the resulting XML to start as follows:
# <testsuites ...>
# <properties>
# <property name="test_randomize_ordering_seed" value="17" />
# ...
#
# which we validate here.
out = self._run_test_and_get_xml('--test_randomize_ordering_seed=17')
expected_attrib = {'name': 'test_randomize_ordering_seed', 'value': '17'}
property_attributes = [
prop.attrib for prop in out.findall('./properties/property')]
self.assertIn(expected_attrib, property_attributes)
if __name__ == '__main__':
absltest.main()
| 33.360352 | 173 | 0.630652 |
b9cd338c7357dcf06528eb38e83a7ebc2fe42dcc | 1,657 | py | Python | steamctl/commands/hlmaster/__init__.py | rossengeorgiev/steamctl | c33680cf33ccf4c060e44eb6415a8b8c2cf8c672 | [
"MIT"
] | 138 | 2019-07-19T19:29:10.000Z | 2022-03-28T08:30:55.000Z | steamctl/commands/hlmaster/__init__.py | PixelSymbols/steamctl | 4d6b19a921a4db521ed6e0d05a1dae2458441a9a | [
"MIT"
] | 35 | 2019-06-23T23:10:19.000Z | 2022-03-13T04:28:13.000Z | steamctl/commands/hlmaster/__init__.py | PixelSymbols/steamctl | 4d6b19a921a4db521ed6e0d05a1dae2458441a9a | [
"MIT"
] | 9 | 2019-08-15T13:17:38.000Z | 2022-03-15T23:48:47.000Z |
from steamctl.argparser import register_command
@register_command('hlmaster', help='Query master server and server information')
def setup_arg_parser(cp):
def print_help(*args, **kwargs):
cp.print_help()
cp.set_defaults(_cmd_func=print_help)
sub_cp = cp.add_subparsers(metavar='<subcommand>',
dest='subcommand',
title='List of sub-commands',
description='',
)
scp_query = sub_cp.add_parser("query", help="Query HL Master for servers")
scp_query.add_argument('filter', type=str)
scp_query.add_argument('--ip-only', action='store_true', help='Show short info about each server')
scp_query.add_argument('-n', '--num-servers', default=20, type=int, help="Number of result to return (Default: 20)")
scp_query.add_argument('-m', '--master', default=None, type=str, help="Master server (default: hl2master.steampowered.com:27011)")
scp_query.set_defaults(_cmd_func=__name__ + '.cmds:cmd_hlmaster_query')
scp_info = sub_cp.add_parser("info", help="Query info from a goldsrc or source server")
scp_info.add_argument('server', type=str)
scp_info.add_argument('-i', '--info', action='store_true', help='Show server info')
scp_info.add_argument('-r', '--rules', action='store_true', help='Show server rules')
scp_info.add_argument('-p', '--players', action='store_true', help='Show player list')
scp_info.add_argument('-s', '--short', action='store_true', help='Print server info in short form')
scp_info.set_defaults(_cmd_func=__name__ + '.cmds:cmd_hlmaster_info')
| 53.451613 | 134 | 0.665057 |
3365d4394ff61c185b993ed45296385dd6cfccf8 | 695 | py | Python | graves_n_civil_registry/deceased.py | victorbrittoferreira/civil_n_graves_registry | 50ad7c72d1d39b9973af69615f85965520a4cb0c | [
"MIT"
] | 2 | 2021-11-10T03:13:56.000Z | 2021-11-21T20:17:37.000Z | graves_n_civil_registry/deceased.py | victorbrittoferreira/civil_n_graves_registry | 50ad7c72d1d39b9973af69615f85965520a4cb0c | [
"MIT"
] | null | null | null | graves_n_civil_registry/deceased.py | victorbrittoferreira/civil_n_graves_registry | 50ad7c72d1d39b9973af69615f85965520a4cb0c | [
"MIT"
] | null | null | null | import datetime
from obituary import Obituary
class Deceased:
def __init__(self , obituary, buried : bool ) -> None:
self.obituary = obituary
self.buried = buried
self.dead_status = []
def dead_status_update(self, updating):
#now = datetime.datetime.today()
updating_dated = (updating, datetime.datetime.today())
self.dead_status.append(updating_dated)
def status_records(self):
for update in self.dead_status:
print(update)
# def getName(self):
# return self.name
#
# def getAge(self):
# return self.age
#deceased_test = Deceased(obituary, True)
#print(deceased_test.buried) | 22.419355 | 62 | 0.640288 |
1c73fc22984d9cc31c7d3a5c14d251bfa694892e | 766 | py | Python | libs/pytorch_geometric/torch_geometric/transforms/remove_training_classes.py | MoritzSchueler96/TUM_ADLCV_Deep_Metric_Learning | 365d1609de044ba8498d3add808274f6f2d02d2b | [
"MIT"
] | null | null | null | libs/pytorch_geometric/torch_geometric/transforms/remove_training_classes.py | MoritzSchueler96/TUM_ADLCV_Deep_Metric_Learning | 365d1609de044ba8498d3add808274f6f2d02d2b | [
"MIT"
] | null | null | null | libs/pytorch_geometric/torch_geometric/transforms/remove_training_classes.py | MoritzSchueler96/TUM_ADLCV_Deep_Metric_Learning | 365d1609de044ba8498d3add808274f6f2d02d2b | [
"MIT"
] | null | null | null | from typing import List
from torch_geometric.data import Data
from torch_geometric.transforms import BaseTransform
class RemoveTrainingClasses(BaseTransform):
r"""Removes classes from the node-level training set as given by
:obj:`data.train_mask`, *e.g.*, in order to get a zero-shot label scenario.
Args:
classes (List[int]): The classes to remove from the training set.
"""
def __init__(self, classes: List[int]):
self.classes = classes
def __call__(self, data: Data) -> Data:
data.train_mask = data.train_mask.clone()
for i in self.classes:
data.train_mask[data.y == i] = False
return data
def __repr__(self) -> str:
return 'f{self.__class__.__name__}({self.classes})'
| 30.64 | 79 | 0.671018 |
badabc24b5e6446084e22497cc3a410c18221ce5 | 2,914 | py | Python | demos/power_based_salinet_components_sensetivity_multiprocess.py | AlexanderHoogsteyn/PhaseIdentification | 0ef49456e16dbc5481c324b1ae7340518e1511b6 | [
"MIT"
] | 1 | 2020-11-10T09:18:36.000Z | 2020-11-10T09:18:36.000Z | demos/power_based_salinet_components_sensetivity_multiprocess.py | AlexanderHoogsteyn/PhaseIdentification | 0ef49456e16dbc5481c324b1ae7340518e1511b6 | [
"MIT"
] | null | null | null | demos/power_based_salinet_components_sensetivity_multiprocess.py | AlexanderHoogsteyn/PhaseIdentification | 0ef49456e16dbc5481c324b1ae7340518e1511b6 | [
"MIT"
] | null | null | null | import sys
from os.path import dirname
sys.path.append(dirname("../src/"))
from PhaseIdentification.powerBasedPhaseIdentification import *
from PhaseIdentification.common import *
import matplotlib.pyplot as plt
import multiprocessing
"""
##################################################
DEMO 3
Influence of voltage assist ratio on accuracy of load based methods
For multiple feeders
I can improve this by making shure an additional 10 of missing is added in stead of all new devices
#################################################
"""
def worker(feeder_id):
include_three_phase = True
length = 24 * 20
salient_components = 1
accuracy = 0.1
sal_treshold = 10
acc_class_range = np.array([0.1,0.2,0.5,1.0,0.2,0.5])
sal_treshold_range = list(range(2,length,5))
#voltage_pen_range = np.array([0.1])
s_range = [False,False,False,False,True,True]
scores = []
for i, accuracy in enumerate(acc_class_range):
col = []
for j, nb_salient_components in enumerate(sal_treshold_range):
feeder = Feeder(feederID=feeder_id, include_three_phase=include_three_phase)
phase_identification = PartialPhaseIdentification(feeder, ErrorClass(accuracy, s=s_range[i]))
phase_identification.load_correlation_xu_fixed(nb_salient_components=nb_salient_components,
salient_components=salient_components, length=length)
col += [phase_identification.accuracy()]
scores.append(col)
return np.array(scores)
if __name__ == '__main__':
included_feeders = ["86315_785383", "65028_84566", "1076069_1274129", "1351982_1596442", "65025_80035",
"1076069_1274125"]
cases = ["Case A", "Case B", "Case C", "Case D", "Case E", "Case F"]
sal_treshold_range = list(range(2,24*20,5))
acc_class_range = np.array([0.1, 0.2, 0.5, 1.0, 0.2, 0.5])
reps = 2
jobs = []
tot_scores = np.zeros([len(acc_class_range), len(sal_treshold_range)])
for case, feeder_id in enumerate(included_feeders):
data = [feeder_id]*reps
p = multiprocessing.Pool(40)
scores = p.map(worker, data)
for i in scores:
tot_scores = tot_scores + i
tot_scores = tot_scores/reps/len(cases)
plt.figure(figsize=(8, 6), dpi=80)
y = sal_treshold_range
x = ["Class 0.1","Class 0.2","Class 0.5","Class 1.0","Class 0.2s","Class 0.5s"]
#x = ["Class 0.1"]
for i,c in enumerate(x):
plt.plot(y, tot_scores[i]*100, label=c)
# Decorations
plt.rc('font', size=14)
#plt.title(cases[case], fontsize=20)
plt.xticks(fontsize=12)
plt.ylim([25,105])
plt.xlabel("Number of salient components",fontsize=20)
plt.ylabel("Accuracy (%)",fontsize=20)
plt.yticks(fontsize=12)
plt.legend()
plt.show()
#plt.savefig("salient_components_sensetivity_delta_average")
| 35.108434 | 107 | 0.637612 |
794e0b91c7d4e89e92ab1c32d213aca185c2f386 | 142 | py | Python | zeus/api/schemas/token.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 221 | 2017-07-03T17:29:21.000Z | 2021-12-07T19:56:59.000Z | zeus/api/schemas/token.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 298 | 2017-07-04T18:08:14.000Z | 2022-03-03T22:24:51.000Z | zeus/api/schemas/token.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
] | 24 | 2017-07-15T13:46:45.000Z | 2020-08-16T16:14:45.000Z | from marshmallow import Schema, fields
class TokenSchema(Schema):
id = fields.UUID(dump_only=True)
key = fields.Str(dump_only=True)
| 20.285714 | 38 | 0.739437 |
e16370c2119222124777abe3b5eb244d34d375f3 | 147 | py | Python | jarviscli/plugins/gui.py | Ismail8Ahmed/Jarvis | f3f45c5f1b208f3897929a3d366ae73bee09fba5 | [
"MIT"
] | null | null | null | jarviscli/plugins/gui.py | Ismail8Ahmed/Jarvis | f3f45c5f1b208f3897929a3d366ae73bee09fba5 | [
"MIT"
] | null | null | null | jarviscli/plugins/gui.py | Ismail8Ahmed/Jarvis | f3f45c5f1b208f3897929a3d366ae73bee09fba5 | [
"MIT"
] | null | null | null | from guizero import App, Text, PushButton
@plugin("helloworld")
def helloworld(jarvis, s):
"""Repeats what you type"""
jarvis.say(s)
| 18.375 | 41 | 0.666667 |
93838491b43a7a7233d45433e737652e51808830 | 13,612 | py | Python | amptk/process_illumina_raw.py | nextgenusfs/amptk | 067365ed7baae5b0df0d4309680469792db161e3 | [
"BSD-2-Clause"
] | 30 | 2017-02-09T20:43:43.000Z | 2022-02-01T04:28:59.000Z | amptk/process_illumina_raw.py | nextgenusfs/amptk | 067365ed7baae5b0df0d4309680469792db161e3 | [
"BSD-2-Clause"
] | 75 | 2017-02-04T19:12:05.000Z | 2022-03-01T19:56:36.000Z | amptk/process_illumina_raw.py | nextgenusfs/amptk | 067365ed7baae5b0df0d4309680469792db161e3 | [
"BSD-2-Clause"
] | 9 | 2017-05-12T13:39:10.000Z | 2021-11-16T19:09:08.000Z | #!/usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import argparse
import shutil
import multiprocessing
import glob
import itertools
import re
from natsort import natsorted
from amptk import amptklib
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=48)
class col(object):
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
def processReadsPE(input, args=False):
base = os.path.basename(input)
forward_reads = os.path.join(tmpdir, base+'_R1.fq')
reverse_reads = os.path.join(tmpdir, base+'_R2.fq')
index_reads = os.path.join(tmpdir, base+'_R3.fq')
trim_forward = os.path.join(tmpdir, base+'_R1.trimmed.fq')
trim_reverse = os.path.join(tmpdir, base+'_R2.trimmed.fq')
merged_reads = os.path.join(tmpdir, base+'.merged.fq')
DemuxOut = os.path.join(tmpdir, base+'.demux.fq')
Total, BCFound, ForPrimerCount, RevPrimerCount = amptklib.DemuxIllumina(
forward_reads, reverse_reads, index_reads, Barcodes,
args.barcode_mismatch, FwdPrimer, RevPrimer,
args.primer_mismatch, trim_forward, trim_reverse,
trim_primers=args.no_primer_trim)
amptklib.MergeReadsSimple(trim_forward, trim_reverse, '.',
merged_reads, args.min_len, usearch,
args.rescue_forward, args.merge_method)
MergeCount = amptklib.countfastq(merged_reads)
amptklib.losslessTrim(merged_reads, FwdPrimer, RevPrimer,
args.primer_mismatch, args.trim_len,
args.pad, args.min_len, DemuxOut)
FinalCount = amptklib.countfastq(DemuxOut)
TooShort = MergeCount - FinalCount
stats = os.path.join(tmpdir, base+'.stats')
with open(stats, 'w') as counts:
counts.write("%i,%i,%i,%i,%i,%i\n" % (Total, BCFound, ForPrimerCount, RevPrimerCount, TooShort, FinalCount))
def safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try: processReadsPE(*args, **kwargs)
except Exception as e:
print("error: %s run(*%r, **%r)" % (e, args, kwargs))
def main(args):
global FwdPrimer, RevPrimer, Barcodes, tmpdir, usearch
parser=argparse.ArgumentParser(prog='amptk-process_illumina_raw.py',
usage="%(prog)s [options] -i file.fastq\n%(prog)s -h for help menu",
description='''Script finds barcodes, strips forward and reverse primers, relabels, and then trim/pads reads to a set length''',
epilog="""Written by Jon Palmer (2015) nextgenusfs@gmail.com""",
formatter_class=MyFormatter)
parser.add_argument('-f', '--forward', dest='fastq', required=True, help='Illumina FASTQ R1 reads')
parser.add_argument('-r', '--reverse', required=True, help='Illumina FASTQ R2 reads')
parser.add_argument('-i', '--index', nargs='+', required=True, help='Illumina FASTQ index reads')
parser.add_argument('-m', '--mapping_file', help='QIIME-like mapping file')
parser.add_argument('--read_length', type=int, help='Read length, i.e. 2 x 300 bp = 300')
parser.add_argument('-o', '--out', dest="out", default='illumina_out', help='Base name for output')
parser.add_argument('--fwd_primer', dest="F_primer", default='515FB', help='Forward Primer')
parser.add_argument('--rev_primer', dest="R_primer", default='806RB', help='Reverse Primer')
parser.add_argument('--primer_mismatch', default=2, type=int, help='Number of mis-matches in primer')
parser.add_argument('--barcode_mismatch', default=0, type=int, help='Number of mis-matches in barcode')
parser.add_argument('--barcode_fasta', help='FASTA file containing Barcodes (Names & Sequences)')
parser.add_argument('--rescue_forward', default='on', choices=['on', 'off'], help='Rescue Not-merged forward reads')
parser.add_argument('--barcode_rev_comp', action='store_true', help='Reverse complement barcode sequences')
parser.add_argument('--min_len', default=100, type=int, help='Minimum read length to keep')
parser.add_argument('-l', '--trim_len', default=300, type=int, help='Trim length for reads')
parser.add_argument('-p', '--pad', default='off', choices=['on', 'off'], help='Pad with Ns to a set length')
parser.add_argument('--no-primer-trim', dest='no_primer_trim', action='store_false', help='Do not trim primers')
parser.add_argument('--cpus', type=int, help="Number of CPUs. Default: auto")
parser.add_argument('-u', '--usearch', dest="usearch", default='usearch9', help='USEARCH9 EXE')
parser.add_argument('--cleanup', action='store_true', help='remove intermediate files')
parser.add_argument('--merge_method', default='vsearch', choices=['usearch', 'vsearch'], help='Software to use for PE read merging')
args=parser.parse_args(args)
args.out = re.sub(r'\W+', '', args.out)
log_name = args.out+'.amptk-demux.log'
if os.path.isfile(log_name):
os.remove(log_name)
amptklib.setupLogging(log_name)
FNULL = open(os.devnull, 'w')
cmd_args = " ".join(sys.argv)+'\n'
amptklib.log.debug(cmd_args)
print("-------------------------------------------------------")
# initialize script, log system info and usearch version
amptklib.SystemInfo()
# get version of amptk
usearch = args.usearch
amptklib.versionDependencyChecks(usearch, method=args.merge_method)
# get number of CPUs to use
if not args.cpus:
cpus = multiprocessing.cpu_count()
else:
cpus = args.cpus
# create tmpdir
tmpdir = args.out.split('.')[0]+'_'+str(os.getpid())
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
# parse a mapping file or a barcode fasta file, primers, etc get setup
# dealing with Barcodes, get ion barcodes or parse the barcode_fasta argument
barcode_file = args.out + ".barcodes_used.fa"
if os.path.isfile(barcode_file):
os.remove(barcode_file)
# check if mapping file passed, use this if present, otherwise use command line arguments
SampleData = {}
Barcodes = {}
RevBarcodes = {}
FwdPrimer = ''
RevPrimer = ''
if args.mapping_file:
if not os.path.isfile(args.mapping_file):
amptklib.log.error("Mapping file not found: %s" % args.mapping_file)
sys.exit(1)
SampleData, Barcodes, RevBarcodes, FwdPrimer, RevPrimer = amptklib.parseMappingFileNEW(args.mapping_file)
else: #no mapping file, so create dictionaries from barcode fasta files
if not args.barcode_fasta:
amptklib.log.error("You did not specify a --barcode_fasta or --mapping_file, one is required")
sys.exit(1)
else:
shutil.copyfile(args.barcode_fasta, barcode_file)
Barcodes = amptklib.fasta2barcodes(barcode_file, False)
if FwdPrimer == '' or RevPrimer == '':
#parse primers here so doesn't conflict with mapping primers
#look up primer db otherwise default to entry
if args.F_primer in amptklib.primer_db:
FwdPrimer = amptklib.primer_db.get(args.F_primer)
amptklib.log.info("{:} fwd primer found in AMPtk primer db, setting to: {:}".format(args.F_primer, FwdPrimer))
else:
FwdPrimer = args.F_primer
amptklib.log.info("{:} fwd primer not found in AMPtk primer db, assuming it is actual primer sequence.".format(args.F_primer))
if args.R_primer in amptklib.primer_db:
RevPrimer = amptklib.primer_db.get(args.R_primer)
amptklib.log.info("{:} rev primer found in AMPtk primer db, setting to: {:}".format(args.R_primer, RevPrimer))
else:
RevPrimer = args.R_primer
amptklib.log.info("{:} rev primer not found in AMPtk primer db, assuming it is actual primer sequence.".format(args.R_primer))
#if still no primers set, then exit
if FwdPrimer == '' or RevPrimer == '':
amptklib.log.error("Please provide primer sequences via --fwd_primer and --rev_primer")
sys.exit(1)
#if barcodes_rev_comp passed then reverse complement the keys in mapdict
if args.barcode_rev_comp:
amptklib.log.info("Reverse complementing barcode sequences")
backupDict = Barcodes
Barcodes = {}
for k,v in list(backupDict.items()):
RCkey = amptklib.RevComp(v)
Barcodes[k] = RCkey
amptklib.log.info("Loading %i samples from mapping file" % len(Barcodes))
amptklib.log.info('FwdPrimer: {:} RevPrimer: {:}'.format(FwdPrimer, RevPrimer))
amptklib.log.info('Dropping reads less than {:} bp and setting lossless trimming to {:} bp.'.format(args.min_len, args.trim_len))
#rename reads according to indexes
if not amptklib.PEandIndexCheck(args.fastq, args.reverse, args.index[0]): #check they are all same length
amptklib.log.error("FASTQ input malformed, read numbers do not match")
sys.exit(1)
amptklib.log.info("Loading FASTQ Records")
NumSeqs = amptklib.countfastq(args.fastq)
if cpus > 1:
amptklib.log.info("Splitting FASTQ files over {:} cpus".format(cpus))
amptklib.split_fastqPEandI(args.fastq, args.reverse, args.index[0], NumSeqs, tmpdir, cpus*2)
file_list = []
for file in os.listdir(tmpdir):
if file.endswith('.fq'):
filepart = os.path.join(tmpdir, file.split('_R')[0])
if not filepart in file_list:
file_list.append(filepart)
amptklib.log.info("Mapping indexes to reads and renaming PE reads")
amptklib.runMultiProgress(safe_run, file_list, cpus, args=args)
else:
amptklib.log.info("Mapping indexes to reads and renaming PE reads")
shutil.copyfile(args.fastq, os.path.join(tmpdir, 'chunk_R1.fq'))
shutil.copyfile(args.reverse, os.path.join(tmpdir, 'chunk_R2.fq'))
shutil.copyfile(args.index[0], os.path.join(tmpdir, 'chunk_R3.fq'))
processReadsPE(os.path.join(tmpdir, 'chunk'), args=args)
print("-------------------------------------------------------")
#Now concatenate all of the demuxed files together
amptklib.log.info("Concatenating Demuxed Files")
tmpDemux = os.path.join(tmpdir, args.out + '.demux.fq')
with open(tmpDemux, 'wb') as outfile:
for filename in glob.glob(os.path.join(tmpdir,'*.demux.fq')):
if filename == tmpDemux:
continue
with open(filename, 'r') as readfile:
shutil.copyfileobj(readfile, outfile)
#parse the stats
finalstats = [0,0,0,0,0,0]
for file in os.listdir(tmpdir):
if file.endswith('.stats'):
with open(os.path.join(tmpdir, file), 'r') as statsfile:
line = statsfile.readline()
line = line.replace('\n', '')
newstats = line.split(',')
newstats = [int(i) for i in newstats]
for x, num in enumerate(newstats):
finalstats[x] += num
#finally reindex output
#last thing is to re-number of reads as it is possible they could have same name from multitprocessor split
Demux = args.out + '.demux.fq'
amptklib.fastqreindex(tmpDemux, Demux)
amptklib.SafeRemove(tmpDemux)
#output stats of the run
amptklib.log.info('{0:,}'.format(finalstats[0])+' total reads')
amptklib.log.info('{0:,}'.format(finalstats[0] - finalstats[1])+' discarded no index match')
amptklib.log.info('{0:,}'.format(finalstats[2])+' Fwd Primer found, {0:,}'.format(finalstats[3])+ ' Rev Primer found')
amptklib.log.info('{0:,}'.format(finalstats[4])+' discarded too short (< %i bp)' % args.min_len)
amptklib.log.info('{0:,}'.format(finalstats[5])+' valid output reads')
#now loop through data and find barcoded samples, counting each.....
BarcodeCount = {}
with open(Demux, 'r') as input:
header = itertools.islice(input, 0, None, 4)
for line in header:
ID = line.split("=",1)[-1].split(";")[0]
if ID not in BarcodeCount:
BarcodeCount[ID] = 1
else:
BarcodeCount[ID] += 1
#now let's count the barcodes found and count the number of times they are found.
barcode_counts = "%30s: %s" % ('Sample', 'Count')
for k,v in natsorted(list(BarcodeCount.items()), key=lambda k_v: k_v[1], reverse=True):
barcode_counts += "\n%30s: %s" % (k, str(BarcodeCount[k]))
amptklib.log.info("Found %i barcoded samples\n%s" % (len(BarcodeCount), barcode_counts))
#create mapping file if one doesn't exist
genericmapfile = args.out + '.mapping_file.txt'
amptklib.CreateGenericMappingFile(Barcodes, {}, FwdPrimer, RevPrimer, genericmapfile, BarcodeCount)
#compress the output to save space
FinalDemux = Demux+'.gz'
amptklib.Fzip(Demux, FinalDemux, cpus)
amptklib.removefile(Demux)
if args.cleanup:
amptklib.SafeRemove(tmpdir)
#get file size
filesize = os.path.getsize(FinalDemux)
readablesize = amptklib.convertSize(filesize)
amptklib.log.info("Output file: %s (%s)" % (FinalDemux, readablesize))
amptklib.log.info("Mapping file: %s" % genericmapfile)
print("-------------------------------------------------------")
if 'darwin' in sys.platform:
print(col.WARN + "\nExample of next cmd: " + col.END + "amptk cluster -i %s -o out\n" % (FinalDemux))
else:
print("\nExample of next cmd: amptk cluster -i %s -o out\n" % (FinalDemux))
if __name__ == "__main__":
main(args)
| 48.269504 | 138 | 0.650088 |
ef182bfe2a233dca5df06b23860b51d66e2f4d06 | 8,167 | bzl | Python | tools/bzl/gwt.bzl | balag91/gerrit | 7c140198e2bc27b220aeb5ea3eecd05d1fee49b6 | [
"Apache-2.0"
] | null | null | null | tools/bzl/gwt.bzl | balag91/gerrit | 7c140198e2bc27b220aeb5ea3eecd05d1fee49b6 | [
"Apache-2.0"
] | null | null | null | tools/bzl/gwt.bzl | balag91/gerrit | 7c140198e2bc27b220aeb5ea3eecd05d1fee49b6 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Port of Buck native gwt_binary() rule. See discussion in context of
# https://github.com/facebook/buck/issues/109
load("//tools/bzl:genrule2.bzl", "genrule2")
load("//tools/bzl:java.bzl", "java_library2")
jar_filetype = FileType([".jar"])
BROWSERS = [
"chrome",
"firefox",
"gecko1_8",
"safari",
"msie",
"ie8",
"ie9",
"ie10",
"edge",
]
ALIASES = {
"chrome": "safari",
"firefox": "gecko1_8",
"msie": "ie10",
"edge": "gecko1_8",
}
MODULE = "com.google.gerrit.GerritGwtUI"
GWT_COMPILER = "com.google.gwt.dev.Compiler"
GWT_JVM_ARGS = ["-Xmx512m"]
GWT_COMPILER_ARGS = [
"-XdisableClassMetadata",
]
GWT_COMPILER_ARGS_RELEASE_MODE = GWT_COMPILER_ARGS + [
"-XdisableCastChecking",
]
GWT_PLUGIN_DEPS_NEVERLINK = [
"//gerrit-plugin-gwtui:gwtui-api-lib-neverlink",
"//lib/gwt:user-neverlink",
]
GWT_PLUGIN_DEPS = [
"//gerrit-plugin-gwtui:gwtui-api-lib",
]
GWT_TRANSITIVE_DEPS = [
"//lib/gwt:ant",
"//lib/gwt:colt",
"//lib/gwt:javax-validation",
"//lib/gwt:javax-validation_src",
"//lib/gwt:jsinterop-annotations",
"//lib/gwt:jsinterop-annotations_src",
"//lib/gwt:tapestry",
"//lib/gwt:w3c-css-sac",
"//lib/ow2:ow2-asm",
"//lib/ow2:ow2-asm-analysis",
"//lib/ow2:ow2-asm-commons",
"//lib/ow2:ow2-asm-tree",
"//lib/ow2:ow2-asm-util",
]
DEPS = GWT_TRANSITIVE_DEPS + [
"//gerrit-gwtexpui:CSS",
"//lib:gwtjsonrpc",
"//lib/gwt:dev",
"//lib/jgit/org.eclipse.jgit:jgit-source",
]
USER_AGENT_XML = """<module rename-to='gerrit_ui'>
<inherits name='%s'/>
<set-property name='user.agent' value='%s'/>
<set-property name='locale' value='default'/>
</module>
"""
def gwt_module(gwt_xml=None, resources=[], srcs=[], **kwargs):
if gwt_xml:
resources += [gwt_xml]
java_library2(
srcs = srcs,
resources = resources,
**kwargs)
def _gwt_user_agent_module(ctx):
"""Generate user agent specific GWT module."""
if not ctx.attr.user_agent:
return None
ua = ctx.attr.user_agent
impl = ua
if ua in ALIASES:
impl = ALIASES[ua]
# intermediate artifact: user agent speific GWT xml file
gwt_user_agent_xml = ctx.new_file(ctx.label.name + "_gwt.xml")
ctx.file_action(output = gwt_user_agent_xml,
content=USER_AGENT_XML % (MODULE, impl))
# intermediate artifact: user agent specific zip with GWT module
gwt_user_agent_zip = ctx.new_file(ctx.label.name + "_gwt.zip")
gwt = '%s_%s.gwt.xml' % (MODULE.replace('.', '/'), ua)
dir = gwt_user_agent_zip.path + ".dir"
cmd = " && ".join([
"p=$PWD",
"mkdir -p %s" % dir,
"cd %s" % dir,
"mkdir -p $(dirname %s)" % gwt,
"cp $p/%s %s" % (gwt_user_agent_xml.path, gwt),
"$p/%s cC $p/%s $(find . | sed 's|^./||')" % (ctx.executable._zip.path, gwt_user_agent_zip.path)
])
ctx.action(
inputs = [gwt_user_agent_xml] + ctx.files._zip,
outputs = [gwt_user_agent_zip],
command = cmd,
mnemonic = "GenerateUserAgentGWTModule")
return struct(
zip=gwt_user_agent_zip,
module=MODULE + '_' + ua
)
def _gwt_binary_impl(ctx):
module = ctx.attr.module[0]
output_zip = ctx.outputs.output
output_dir = output_zip.path + '.gwt_output'
deploy_dir = output_zip.path + '.gwt_deploy'
deps = _get_transitive_closure(ctx)
paths = []
for dep in deps:
paths.append(dep.path)
gwt_user_agent_modules = []
ua = _gwt_user_agent_module(ctx)
if ua:
paths.append(ua.zip.path)
gwt_user_agent_modules.append(ua.zip)
module = ua.module
cmd = "external/local_jdk/bin/java %s -Dgwt.normalizeTimestamps=true -cp %s %s -war %s -deploy %s " % (
" ".join(ctx.attr.jvm_args),
":".join(paths),
GWT_COMPILER,
output_dir,
deploy_dir,
)
# TODO(davido): clean up command concatenation
cmd += " ".join([
"-style %s" % ctx.attr.style,
"-optimize %s" % ctx.attr.optimize,
"-strict",
" ".join(ctx.attr.compiler_args),
module + "\n",
"rm -rf %s/gwt-unitCache\n" % output_dir,
"root=`pwd`\n",
"cd %s; $root/%s Cc ../%s $(find .)\n" % (
output_dir,
ctx.executable._zip.path,
output_zip.basename,
)
])
ctx.action(
inputs = list(deps) + ctx.files._jdk + ctx.files._zip + gwt_user_agent_modules,
outputs = [output_zip],
mnemonic = "GwtBinary",
progress_message = "GWT compiling " + output_zip.short_path,
command = "set -e\n" + cmd,
)
def _get_transitive_closure(ctx):
deps = set()
for dep in ctx.attr.module_deps:
deps += dep.java.transitive_runtime_deps
deps += dep.java.transitive_source_jars
for dep in ctx.attr.deps:
if hasattr(dep, 'java'):
deps += dep.java.transitive_runtime_deps
elif hasattr(dep, 'files'):
deps += dep.files
return deps
gwt_binary = rule(
attrs = {
"user_agent": attr.string(),
"style": attr.string(default = "OBF"),
"optimize": attr.string(default = "9"),
"deps": attr.label_list(allow_files = jar_filetype),
"module": attr.string_list(default = [MODULE]),
"module_deps": attr.label_list(allow_files = jar_filetype),
"compiler_args": attr.string_list(),
"jvm_args": attr.string_list(),
"_jdk": attr.label(
default = Label("//tools/defaults:jdk"),
),
"_zip": attr.label(
default = Label("@bazel_tools//tools/zip:zipper"),
cfg = "host",
executable = True,
single_file = True,
),
},
outputs = {
"output": "%{name}.zip",
},
implementation = _gwt_binary_impl,
)
def gwt_genrule(suffix = ""):
dbg = 'ui_dbg' + suffix
opt = 'ui_opt' + suffix
module_dep = ':ui_module' + suffix
args = GWT_COMPILER_ARGS_RELEASE_MODE if suffix == "_r" else GWT_COMPILER_ARGS
genrule2(
name = 'ui_optdbg' + suffix,
srcs = [
':' + dbg,
':' + opt,
],
cmd = 'cd $$TMP;' +
'unzip -q $$ROOT/$(location :%s);' % dbg +
'mv' +
' gerrit_ui/gerrit_ui.nocache.js' +
' gerrit_ui/dbg_gerrit_ui.nocache.js;' +
'unzip -qo $$ROOT/$(location :%s);' % opt +
'mkdir -p $$(dirname $@);' +
'zip -qrD $$ROOT/$@ .',
outs = ['ui_optdbg' + suffix + '.zip'],
visibility = ['//visibility:public'],
)
gwt_binary(
name = opt,
module = [MODULE],
module_deps = [module_dep],
deps = DEPS,
compiler_args = args,
jvm_args = GWT_JVM_ARGS,
)
gwt_binary(
name = dbg,
style = 'PRETTY',
optimize = "0",
module_deps = [module_dep],
deps = DEPS,
compiler_args = GWT_COMPILER_ARGS,
jvm_args = GWT_JVM_ARGS,
)
def gen_ui_module(name, suffix = ""):
gwt_module(
name = name + suffix,
srcs = native.glob(['src/main/java/**/*.java']),
gwt_xml = 'src/main/java/%s.gwt.xml' % MODULE.replace('.', '/'),
resources = native.glob(
['src/main/java/**/*'],
exclude = ['src/main/java/**/*.java'] +
['src/main/java/%s.gwt.xml' % MODULE.replace('.', '/')]),
deps = [
'//gerrit-gwtui-common:diffy_logo',
'//gerrit-gwtui-common:client',
'//gerrit-gwtexpui:CSS',
'//lib/codemirror:codemirror' + suffix,
'//lib/gwt:user',
],
visibility = ['//visibility:public'],
)
def gwt_user_agent_permutations():
for ua in BROWSERS:
gwt_binary(
name = "ui_%s" % ua,
user_agent = ua,
style = 'PRETTY',
optimize = "0",
module = [MODULE],
module_deps = [':ui_module'],
deps = DEPS,
compiler_args = GWT_COMPILER_ARGS,
jvm_args = GWT_JVM_ARGS,
)
| 26.777049 | 105 | 0.61271 |
34f9755ce0ef06c4fb11053944eb0622094e7a6f | 4,000 | py | Python | manila/api/v2/share_instances.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 159 | 2015-01-02T09:35:15.000Z | 2022-01-04T11:51:34.000Z | manila/api/v2/share_instances.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 6 | 2021-02-11T16:09:43.000Z | 2022-03-15T09:56:25.000Z | manila/api/v2/share_instances.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 128 | 2015-01-05T22:52:28.000Z | 2021-12-29T14:00:58.000Z | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import share_instance as instance_view
from manila import db
from manila import exception
from manila import share
class ShareInstancesController(wsgi.Controller, wsgi.AdminActionsMixin):
"""The share instances API controller for the OpenStack API."""
resource_name = 'share_instance'
_view_builder_class = instance_view.ViewBuilder
def __init__(self):
self.share_api = share.API()
super(ShareInstancesController, self).__init__()
def _get(self, *args, **kwargs):
return db.share_instance_get(*args, **kwargs)
def _update(self, *args, **kwargs):
db.share_instance_update(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.share_api.delete_instance(*args, **kwargs)
@wsgi.Controller.api_version('2.3', '2.6')
@wsgi.action('os-reset_status')
def instance_reset_status_legacy(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('reset_status')
def instance_reset_status(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('2.3', '2.6')
@wsgi.action('os-force_delete')
def instance_force_delete_legacy(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('force_delete')
def instance_force_delete(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version("2.3", "2.34") # noqa
@wsgi.Controller.authorize
def index(self, req): # pylint: disable=function-redefined
context = req.environ['manila.context']
req.GET.pop('export_location_id', None)
req.GET.pop('export_location_path', None)
instances = db.share_instances_get_all(context)
return self._view_builder.detail_list(req, instances)
@wsgi.Controller.api_version("2.35") # noqa
@wsgi.Controller.authorize
def index(self, req): # pylint: disable=function-redefined # noqa F811
context = req.environ['manila.context']
filters = {}
filters.update(req.GET)
common.remove_invalid_options(
context, filters, ('export_location_id', 'export_location_path'))
instances = db.share_instances_get_all(context, filters)
return self._view_builder.detail_list(req, instances)
@wsgi.Controller.api_version("2.3")
@wsgi.Controller.authorize
def show(self, req, id):
context = req.environ['manila.context']
try:
instance = db.share_instance_get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, instance)
@wsgi.Controller.api_version("2.3")
@wsgi.Controller.authorize('index')
def get_share_instances(self, req, share_id):
context = req.environ['manila.context']
try:
share = self.share_api.get(context, share_id)
except exception.NotFound:
raise exc.HTTPNotFound()
view = instance_view.ViewBuilder()
return view.detail_list(req, share.instances)
def create_resource():
return wsgi.Resource(ShareInstancesController())
| 34.782609 | 78 | 0.6885 |
dbbb0ae1c7144a6683ff233b0993886c90b7fe6b | 6,104 | py | Python | auth/scopes.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2021-05-30T01:54:21.000Z | 2021-05-30T01:54:21.000Z | auth/scopes.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 20 | 2019-12-26T17:32:34.000Z | 2022-03-21T22:18:06.000Z | auth/scopes.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2020-05-31T16:28:40.000Z | 2020-05-31T16:28:40.000Z | from collections import namedtuple
import features
import re
Scope = namedtuple('scope', ['scope', 'icon', 'dangerous', 'title', 'description'])
READ_REPO = Scope(scope='repo:read',
icon='fa-hdd-o',
dangerous=False,
title='View all visible repositories',
description=('This application will be able to view and pull all repositories '
'visible to the granting user or robot account'))
WRITE_REPO = Scope(scope='repo:write',
icon='fa-hdd-o',
dangerous=False,
title='Read/Write to any accessible repositories',
description=('This application will be able to view, push and pull to all '
'repositories to which the granting user or robot account has '
'write access'))
ADMIN_REPO = Scope(scope='repo:admin',
icon='fa-hdd-o',
dangerous=False,
title='Administer Repositories',
description=('This application will have administrator access to all '
'repositories to which the granting user or robot account has '
'access'))
CREATE_REPO = Scope(scope='repo:create',
icon='fa-plus',
dangerous=False,
title='Create Repositories',
description=('This application will be able to create repositories in to any '
'namespaces that the granting user or robot account is allowed '
'to create repositories'))
READ_USER = Scope(scope= 'user:read',
icon='fa-user',
dangerous=False,
title='Read User Information',
description=('This application will be able to read user information such as '
'username and email address.'))
ADMIN_USER = Scope(scope= 'user:admin',
icon='fa-gear',
dangerous=True,
title='Administer User',
description=('This application will be able to administer your account '
'including creating robots and granting them permissions '
'to your repositories. You should have absolute trust in the '
'requesting application before granting this permission.'))
ORG_ADMIN = Scope(scope='org:admin',
icon='fa-gear',
dangerous=True,
title='Administer Organization',
description=('This application will be able to administer your organizations '
'including creating robots, creating teams, adjusting team '
'membership, and changing billing settings. You should have '
'absolute trust in the requesting application before granting this '
'permission.'))
DIRECT_LOGIN = Scope(scope='direct_user_login',
icon='fa-exclamation-triangle',
dangerous=True,
title='Full Access',
description=('This scope should not be available to OAuth applications. '
'Never approve a request for this scope!'))
SUPERUSER = Scope(scope='super:user',
icon='fa-street-view',
dangerous=True,
title='Super User Access',
description=('This application will be able to administer your installation '
'including managing users, managing organizations and other '
'features found in the superuser panel. You should have '
'absolute trust in the requesting application before granting this '
'permission.'))
ALL_SCOPES = {scope.scope: scope for scope in (READ_REPO, WRITE_REPO, ADMIN_REPO, CREATE_REPO,
READ_USER, ORG_ADMIN, SUPERUSER, ADMIN_USER)}
IMPLIED_SCOPES = {
ADMIN_REPO: {ADMIN_REPO, WRITE_REPO, READ_REPO},
WRITE_REPO: {WRITE_REPO, READ_REPO},
READ_REPO: {READ_REPO},
CREATE_REPO: {CREATE_REPO},
READ_USER: {READ_USER},
ORG_ADMIN: {ORG_ADMIN},
SUPERUSER: {SUPERUSER},
ADMIN_USER: {ADMIN_USER},
None: set(),
}
def app_scopes(app_config):
scopes_from_config = dict(ALL_SCOPES)
if not app_config.get('FEATURE_SUPER_USERS', False):
del scopes_from_config[SUPERUSER.scope]
return scopes_from_config
def scopes_from_scope_string(scopes):
if not scopes:
scopes = ''
# Note: The scopes string should be space seperated according to the spec:
# https://tools.ietf.org/html/rfc6749#section-3.3
# However, we also support commas for backwards compatibility with existing callers to our code.
scope_set = {ALL_SCOPES.get(scope, None) for scope in re.split(' |,', scopes)}
return scope_set if not None in scope_set else set()
def validate_scope_string(scopes):
decoded = scopes_from_scope_string(scopes)
return len(decoded) > 0
def is_subset_string(full_string, expected_string):
""" Returns true if the scopes found in expected_string are also found
in full_string.
"""
full_scopes = scopes_from_scope_string(full_string)
if not full_scopes:
return False
full_implied_scopes = set.union(*[IMPLIED_SCOPES[scope] for scope in full_scopes])
expected_scopes = scopes_from_scope_string(expected_string)
return expected_scopes.issubset(full_implied_scopes)
def get_scope_information(scopes_string):
scopes = scopes_from_scope_string(scopes_string)
scope_info = []
for scope in scopes:
scope_info.append({
'title': scope.title,
'scope': scope.scope,
'description': scope.description,
'icon': scope.icon,
'dangerous': scope.dangerous,
})
return scope_info
| 41.52381 | 99 | 0.591088 |
c151de5fe443434b44e383c56c6f1575518ac534 | 6,103 | py | Python | main.py | MoeZilla/NSFW-USERBOT | 7e91c99bb86f076d59d501a3888d3c4019ae5729 | [
"MIT"
] | 4 | 2021-07-10T12:46:46.000Z | 2021-12-05T08:33:12.000Z | main.py | MoeZilla/NSFW-USERBOT | 7e91c99bb86f076d59d501a3888d3c4019ae5729 | [
"MIT"
] | null | null | null | main.py | MoeZilla/NSFW-USERBOT | 7e91c99bb86f076d59d501a3888d3c4019ae5729 | [
"MIT"
] | 2 | 2021-12-05T08:33:13.000Z | 2022-02-24T18:27:06.000Z | import os
from pyrogram import Client, filters
import rapidjson as json
import requests
import time
StartTime = time.time()
API_ID = os.environ.get("API_ID", None)
API_HASH = os.environ.get("API_HASH", None)
SESSION = os.environ.get("SESSION", None)
PREFIX = os.environ.get("PREFIX", None)
app = Client(
session_name=SESSION,
api_id=API_ID,
api_hash=API_HASH,
)
REPOLINK = """ Source code: [Github](https://github.com/Moezilla/vc-userbot)
License: [ GPL-3.0 License](https://github.com/moezilla/vc-userbot/blob/master/LICENSE.md)"""
@app.on_message(filters.command("repo", PREFIX))
async def repo(_, message):
await message.reply_text(REPOLINK)
@app.on_message(filters.command("smug", PREFIX))
def smug(_, message):
smug = requests.get("https://nekos.life/api/v2/img/smug").json()
smug = url.get("smug")
message.reply_video(smug)
@app.on_message(filters.command("solog", PREFIX))
def solog(_, message):
solog = requests.get("https://nekos.life/api/v2/img/solog").json()
solog = url.get("solog")
message.reply_video(solog)
@app.on_message(filters.command("neko", PREFIX))
def neko(_, message):
neko = requests.get("https://nekos.life/api/v2/img/neko").json()
neko = url.get("neko")
message.reply_photo(neko)
@app.on_message(filters.command("feet", PREFIX))
def feet(_, message):
feet = requests.get("https://nekos.life/api/v2/img/feet").json()
feet = url.get("feet")
message.reply_photo(feet)
@app.on_message(filters.command("yuri", PREFIX))
def yuri(_, message):
yuri = requests.get("https://nekos.life/api/v2/img/yuri").json()
yuri = url.get("yuri")
message.reply_photo(yuri)
@app.on_message(filters.command("trap", PREFIX))
def trap(_, message):
trap = requests.get("https://nekos.life/api/v2/img/trap").json()
trap = url.get("trap")
message.reply_photo(trap)
@app.on_message(filters.command("futanari", PREFIX))
def futanari(_, message):
futanari = requests.get("https://nekos.life/api/v2/img/futanari").json()
futanari = url.get("futanari")
message.reply_photo(futanari)
@app.on_message(filters.command("hololewd", PREFIX))
def hololewd(_, message):
hololewd = requests.get("https://nekos.life/api/v2/img/hololewd").json()
hololewd = url.get("hololewd")
message.reply_photo(hololewd)
@app.on_message(filters.command("lewdkemo", PREFIX))
def lewdkemo(_, message):
lewdkemo = requests.get("https://nekos.life/api/v2/img/lewdkemo").json()
lewdkemo = url.get("lewdkemo")
message.reply_photo(lewdkemo)
@app.on_message(filters.command("sologif", PREFIX))
def solog(_, message):
solog = requests.get("https://nekos.life/api/v2/img/solog").json()
solog = url.get("solog")
message.reply_video(solog)
@app.on_message(filters.command("feetgif", PREFIX))
def feetg(_, message):
feetg = requests.get("https://nekos.life/api/v2/img/feetg").json()
feetg = url.get("feetg")
message.reply_video(feetg)
@app.on_message(filters.command("cumgif", PREFIX))
def cum(_, message):
cum = requests.get("https://nekos.life/api/v2/img/cum").json()
cum = url.get("cum")
message.reply_video(cum)
@app.on_message(filters.command("erokemo", PREFIX))
def erokemo(_, message):
erokemo = requests.get("https://nekos.life/api/v2/img/erokemo").json()
erokemo = url.get("erokemo")
message.reply_photo(erokemo)
@app.on_message(filters.command("les", PREFIX))
def les(_, message):
les = requests.get("https://nekos.life/api/v2/img/les").json()
les = url.get("les")
message.reply_video(les)
@app.on_message(filters.command("wallpaper", PREFIX))
def wallpaper(_, message):
wallpaper = requests.get("https://nekos.life/api/v2/img/wallpaper").json()
wallpaper = url.get("wallpaper")
message.reply_photo(wallpaper)
@app.on_message(filters.command("lewdk", PREFIX))
def lewdk(_, message):
lewdk = requests.get("https://nekos.life/api/v2/img/lewdk").json()
lewdk = url.get("lewdk")
message.reply_photo(lewdk)
@app.on_message(filters.command("ngif", PREFIX))
def ngif(_, message):
ngif = requests.get("https://nekos.life/api/v2/img/ngif").json()
ngif = url.get("ngif")
message.reply_video(ngif)
@app.on_message(filters.command("tickle", PREFIX))
def tickle(_, message):
tickle = requests.get("https://nekos.life/api/v2/img/tickle").json()
tickle = url.get("tickle")
message.reply_video(tickle)
@app.on_message(filters.command("lewd", PREFIX))
def lewd(_, message):
lewd = requests.get("https://nekos.life/api/v2/img/lewd").json()
lewd = url.get("lewd")
message.reply_photo(lewd)
@app.on_message(filters.command("feed", PREFIX))
def feed(_, message):
feed = requests.get("https://nekos.life/api/v2/img/feed").json()
feed = url.get("feed")
message.reply_video(feed)
@app.on_message(filters.command("eroyuri", PREFIX))
def eroyuri(_, message):
eroyuri = requests.get("https://nekos.life/api/v2/img/eroyuri").json()
eroyuri = url.get("eroyuri")
message.reply_photo(eroyuri)
@app.on_message(filters.command("eron", PREFIX))
def eron(_, message):
eron = requests.get("https://nekos.life/api/v2/img/eron").json()
eron = url.get("eron")
message.reply_photo(eron)
@app.on_message(filters.command("cum", PREFIX))
def cumjpg(_, message):
cum_jpg = requests.get("https://nekos.life/api/v2/img/cum_jpg").json()
cum_jpg = url.get("cum_jpg")
message.reply_photo(cum_jpg)
@app.on_message(filters.command("bjgif", PREFIX))
def bj(_, message):
bj = requests.get("https://nekos.life/api/v2/img/bj").json()
bj = url.get("bj")
message.reply_video(bj)
@app.on_message(filters.command("blowjob", PREFIX))
def blowjob(_, message):
blowjob = requests.get("https://nekos.life/api/v2/img/blowjob").json()
blowjob = url.get("blowjob")
message.reply_photo(blowjob)
@app.on_message(filters.command("nekogif", PREFIX))
def nekogif(_, message):
nsfw_neko_gif = requests.get("https://nekos.life/api/v2/img/nsfw_neko_gif").json()
nsfw_neko_gif = url.get("nsfw_neko_gif")
message.reply_video(nsfw_neko_gif)
| 32.462766 | 93 | 0.690972 |
f77be3d10e8dd22f6055cf226ece5749f04386d5 | 2,716 | py | Python | tests/unit/test_r.py | xhochy/repo2docker | 67412908b7e5b2cff04789876739e382d1e164ae | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_r.py | xhochy/repo2docker | 67412908b7e5b2cff04789876739e382d1e164ae | [
"BSD-3-Clause"
] | 2 | 2019-02-13T20:59:41.000Z | 2020-02-12T12:54:15.000Z | tests/unit/test_r.py | xhochy/repo2docker | 67412908b7e5b2cff04789876739e382d1e164ae | [
"BSD-3-Clause"
] | null | null | null | from datetime import date
import pytest
from repo2docker import buildpacks
def test_unsupported_version(tmpdir):
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write("r-3.8-2019-01-01")
r = buildpacks.RBuildPack()
with pytest.raises(ValueError) as excinfo:
# access the property to trigger the exception
_ = r.r_version
# check the version is mentioned in the exception
assert "'3.8'" in str(excinfo.value)
@pytest.mark.parametrize(
"runtime_version, expected", [("", "3.6"), ("3.6", "3.6"), ("3.5.1", "3.5")]
)
def test_version_specification(tmpdir, runtime_version, expected):
tmpdir.chdir()
with open("runtime.txt", "w") as f:
if runtime_version:
runtime_version += "-"
f.write(f"r-{runtime_version}2019-01-01")
r = buildpacks.RBuildPack()
assert r.r_version.startswith(expected)
def test_version_completion(tmpdir):
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write(f"r-3.6-2019-01-01")
r = buildpacks.RBuildPack()
assert r.r_version == "3.6.1-3bionic"
@pytest.mark.parametrize(
"runtime, expected",
[
("r-2019-01-01", (2019, 1, 1)),
("r-3.6.1-2019-01-01", (2019, 1, 1)),
("r-3.5-2019-01-01", (2019, 1, 1)),
],
)
def test_mran_date(tmpdir, runtime, expected):
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write(runtime)
r = buildpacks.RBuildPack()
assert r.checkpoint_date == date(*expected)
def test_install_from_base(tmpdir):
# check that for R==3.4 we install from ubuntu
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write("r-3.4-2019-01-02")
r = buildpacks.RBuildPack()
assert "r-base" in r.get_packages()
def test_install_from_ppa(tmpdir):
# check that for R>3.4 we don't install r-base from Ubuntu
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write("r-3.5-2019-01-02")
r = buildpacks.RBuildPack()
assert "r-base" not in r.get_packages()
def test_custom_ppa(tmpdir):
tmpdir.chdir()
with open("runtime.txt", "w") as f:
f.write("r-3.5-2019-01-02")
r = buildpacks.RBuildPack()
scripts = r.get_build_scripts()
# check that at least one of the build scripts adds this new PPA
for user, script in scripts:
if "https://cloud.r-project.org/bin/linux/ubuntu bionic-cran35/" in script:
break
else:
assert False, "Should have added a new PPA"
# check that we install the right package
for user, script in scripts:
if "r-base=3.5" in script:
break
else:
assert False, "Should have installed base R"
| 24.690909 | 83 | 0.618189 |
7528c247515242b65b86949407dc37010241f279 | 819 | py | Python | setup.py | codeswhite/stargen | 440721e9d54cb1eb830f7ece1dc6b8df731fbae8 | [
"MIT"
] | null | null | null | setup.py | codeswhite/stargen | 440721e9d54cb1eb830f7ece1dc6b8df731fbae8 | [
"MIT"
] | 2 | 2021-01-14T13:00:41.000Z | 2021-01-14T13:26:15.000Z | setup.py | codeswhite/stargen | 440721e9d54cb1eb830f7ece1dc6b8df731fbae8 | [
"MIT"
] | 1 | 2020-09-28T18:16:21.000Z | 2020-09-28T18:16:21.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="stargen",
version="0.8.6",
description="Framework for wordlist generation, combination and expansion",
url="https://github.com/codeswhite/stargen",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
install_requires=[
'interutils',
],
entry_points={
'console_scripts': [
'stargen = stargen:main',
],
},
author="Max G",
author_email="max3227@gmail.com",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages()
)
| 26.419355 | 79 | 0.628816 |
29c72ace7e915ad9601368a1620c242256b76367 | 12,511 | py | Python | packages/python/plotly/plotly/validators/_funnelarea.py | TitouenDCL/plotly.py | 0c98391f575dab0e3f08ede907045cc72b3d40a4 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | packages/python/plotly/plotly/validators/_funnelarea.py | TitouenDCL/plotly.py | 0c98391f575dab0e3f08ede907045cc72b3d40a4 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | packages/python/plotly/plotly/validators/_funnelarea.py | TitouenDCL/plotly.py | 0c98391f575dab0e3f08ede907045cc72b3d40a4 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class FunnelareaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="funnelarea", parent_name="", **kwargs):
super(FunnelareaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Funnelarea"),
data_docs=kwargs.pop(
"data_docs",
"""
aspectratio
Sets the ratio between height and width
baseratio
Sets the ratio between bottom length and
maximum top length.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
dlabel
Sets the label step. See `label0` for more
info.
domain
:class:`plotly.graph_objects.funnelarea.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.funnelarea.Hoverla
bel` instance or dict with compatible
properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-format/tre
e/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. The variables
available in `hovertemplate` are the ones
emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variables `label`, `color`, `value`, `text` and
`percent`. Anything contained in tag `<extra>`
is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Sets hover text elements associated with each
sector. If a single string, the same string
appears for all data points. If an array of
string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo`
must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
insidetextfont
Sets the font used for `textinfo` lying inside
the sector.
label0
Alternate to `labels`. Builds a numeric set of
labels. Use with `dlabel` where `label0` is the
starting label and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or
simply count occurrences if `values` is not
provided. For other array attributes (including
color) we use the first non-empty entry among
all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud
for labels .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnelarea.Legendg
rouptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
marker
:class:`plotly.graph_objects.funnelarea.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
scalegroup
If there are multiple funnelareas that should
be sized according to their totals, link them
by providing a non-empty group id here shared
by every trace in the same group.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnelarea.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector.
If trace `textinfo` contains a "text" flag,
these elements will be seen on the chart. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on
the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud
for textposition .
textsrc
Sets the source reference on Chart Studio Cloud
for text .
texttemplate
Template string used for rendering the
information text that appear on points. Note
that this will override `textinfo`. Variables
are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-fo
rmat/tree/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. Every attributes
that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables
`label`, `color`, `value`, `text` and
`percent`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud
for texttemplate .
title
:class:`plotly.graph_objects.funnelarea.Title`
instance or dict with compatible properties
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we
count occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
""",
),
**kwargs
)
| 48.492248 | 75 | 0.546799 |
f38f2b0df1ea0a447150c075643cca82a66e2f1f | 26,177 | py | Python | corehq/apps/app_manager/tests/test_report_config.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/tests/test_report_config.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | 34 | 2020-12-11T18:51:17.000Z | 2022-02-21T10:13:26.000Z | corehq/apps/app_manager/tests/test_report_config.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | null | null | null | import os
from collections import OrderedDict
from xml.etree import cElementTree as ElementTree
from django.test import SimpleTestCase, TestCase
import mock
from casexml.apps.phone.tests.utils import (
call_fixture_generator,
create_restore_user,
)
from corehq.apps.app_manager.const import MOBILE_UCR_VERSION_2
from corehq.apps.app_manager.fixtures import report_fixture_generator
from corehq.apps.app_manager.fixtures.mobile_ucr import (
ReportFixturesProviderV1,
)
from corehq.apps.app_manager.models import (
Application,
GraphConfiguration,
GraphSeries,
MobileSelectFilter,
Module,
ReportAppConfig,
ReportModule,
_filter_by_user_id,
_get_auto_filter_function,
)
from corehq.apps.app_manager.tests.mocks.mobile_ucr import (
mock_report_configuration_get,
mock_report_configurations,
mock_report_data,
)
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.reports_core.filters import Choice
from corehq.apps.userreports.models import ReportConfiguration, ReportMeta
from corehq.apps.userreports.reports.filters.choice_providers import (
ChoiceProvider,
)
from corehq.apps.userreports.reports.filters.specs import (
ChoiceListFilterSpec,
DynamicChoiceListFilterSpec,
FilterChoice,
)
from corehq.apps.userreports.reports.specs import (
FieldColumn,
GraphDisplayColumn,
MultibarChartSpec,
)
from corehq.apps.userreports.tests.utils import (
get_sample_report_config,
mock_datasource_config,
)
from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users
from corehq.toggles import (
ADD_ROW_INDEX_TO_MOBILE_UCRS,
MOBILE_UCR,
NAMESPACE_DOMAIN,
)
from corehq.util.test_utils import flag_enabled
class ReportAppConfigTest(SimpleTestCase):
def test_new_uuid(self):
report_app_config = ReportAppConfig(report_id='report_id')
self.assertTrue(report_app_config.uuid)
self.assertIsInstance(report_app_config.uuid, str)
def test_different_uuids(self):
report_app_config_1 = ReportAppConfig(report_id='report_id')
report_app_config_2 = ReportAppConfig(report_id='report_id')
self.assertNotEqual(report_app_config_1.uuid, report_app_config_2.uuid)
def test_existing_uuid(self):
existing_uuid = 'existing_uuid'
self.assertEqual(
existing_uuid,
ReportAppConfig.wrap({
"report_id": "report_id",
"uuid": existing_uuid,
}).uuid
)
def MAKE_REPORT_CONFIG(domain, report_id, columns=None):
columns = columns or [
FieldColumn(
type='field',
aggregation="simple",
column_id="color_94ec39e6",
display="color",
field="color_94ec39e6"
).to_json(),
]
return ReportConfiguration(
_id=report_id,
title="Entry Report",
aggregation_columns=["color_94ec39e6"],
config_id="516c494736e95b023cc7845b557de0f5",
domain=domain,
report_meta=ReportMeta(builder_report_type="chart", created_by_builder=True),
columns=columns,
configured_charts=[
MultibarChartSpec(type='multibar', chart_id="7451243209119342931", x_axis_column="color_94ec39e6",
y_axis_columns=[GraphDisplayColumn(column_id="count", display="count")]).to_json()
],
filters=[
DynamicChoiceListFilterSpec(
type='dynamic_choice_list',
display="owner name",
field="computed_owner_name_40cc88a0",
slug="computed_owner_name_40cc88a0_1"
).to_json(),
ChoiceListFilterSpec(
type='choice_list',
display="fav color",
field="fav_fruit_abc123",
slug="fav_fruit_abc123_1",
choices=[
FilterChoice(value='a', display='apple'),
FilterChoice(value='b', display='banana'),
FilterChoice(value='c', display='clementine'),
]
).to_json()
],
)
class ReportFiltersSuiteTest(TestCase, TestXmlMixin):
file_path = 'data', 'mobile_ucr'
root = os.path.dirname(__file__)
@staticmethod
def make_report_config(domain, report_id):
class MockChoiceProvider(ChoiceProvider):
def query(self, query_context):
pass
def get_choices_for_known_values(self, values, user):
_map = {'cory': 'Cory Zue', 'ctsims': 'Clayton Sims', 'daniel': 'Daniel Roberts'}
return [Choice(value, _map.get(value, value)) for value in values]
report_configuration = MAKE_REPORT_CONFIG(domain, report_id)
ui_filter = report_configuration.get_ui_filter('computed_owner_name_40cc88a0_1')
ui_filter.choice_provider = MockChoiceProvider(None, None)
return report_configuration
@classmethod
def setUpClass(cls):
super(ReportFiltersSuiteTest, cls).setUpClass()
delete_all_users()
cls.report_id = '7b97e8b53d00d43ca126b10093215a9d'
cls.report_config_mobile_id = 'a98c812873986df34fd1b4ceb45e6164ae9cc664'
cls.domain = 'report-filter-test-domain'
create_domain(cls.domain)
cls.user = create_restore_user(
domain=cls.domain,
username='ralph',
)
MOBILE_UCR.set(cls.domain, True, NAMESPACE_DOMAIN)
ADD_ROW_INDEX_TO_MOBILE_UCRS.set(cls.domain, True, NAMESPACE_DOMAIN)
report_configuration = cls.make_report_config(cls.domain, cls.report_id)
# also make a report with a hidden column
cls.hidden_column_report_id = 'bd2a43018ad9463682165c1bc16347ac'
cls.hidden_column_mobile_id = '45152061d8dc4d2a8d987a0568abe1ae'
report_configuration_with_hidden_column = MAKE_REPORT_CONFIG(
cls.domain,
cls.hidden_column_report_id,
columns=[
FieldColumn(
type='field',
aggregation="simple",
column_id="color_94ec39e6",
display="color",
field="color_94ec39e6"
).to_json(),
FieldColumn(
type='field',
aggregation="simple",
column_id="hidden_color_94ec39e6",
display="color",
field="color_94ec39e6",
visible=False,
).to_json(),
]
)
cls.report_configs_by_id = {
cls.report_id: report_configuration,
cls.hidden_column_report_id: report_configuration_with_hidden_column
}
cls.app = Application.new_app(cls.domain, "Report Filter Test App")
report_module = cls.app.add_module(ReportModule.new_module("Report Module", 'en'))
report_module.report_context_tile = True
report_module.report_configs.append(
ReportAppConfig(
report_id=cls.report_id,
header={},
description="",
complete_graph_configs={
'7451243209119342931': GraphConfiguration(
graph_type="bar",
series=[GraphSeries(
config={},
locale_specific_config={},
data_path="",
x_function="",
y_function="",
)],
)
},
filters=OrderedDict([
('fav_fruit_abc123_1', MobileSelectFilter()),
('computed_owner_name_40cc88a0_1', MobileSelectFilter()),
]),
uuid=cls.report_config_mobile_id,
)
)
report_module.report_configs.append(
ReportAppConfig(
report_id=cls.hidden_column_report_id,
header={},
description="",
complete_graph_configs={},
filters={},
uuid=cls.hidden_column_mobile_id,
)
)
case_module = cls.app.add_module(Module.new_module("Case Module", 'en'))
case_module.case_type = "fish"
case_module.report_context_tile = True
case_form = case_module.new_form("Update Fish", None)
case_form.requires = "case"
case_form.xmlns = "http://openrosa.org/formdesigner/2423EFB5-2E8C-4B8F-9DA0-23FFFD4391AF"
survey_module = cls.app.add_module(Module.new_module("Survey Module", 'en'))
survey_module.report_context_tile = True
survey_form = survey_module.new_form("Survey", None)
survey_form.xmlns = "http://openrosa.org/formdesigner/2423EFB5-2E8C-4B8F-9DA0-23FFFD4391AE"
with mock_report_configurations(cls.report_configs_by_id):
cls.suite = cls.app.create_suite()
cls.data = [
{'color_94ec39e6': 'red', 'count': 2, 'computed_owner_name_40cc88a0': 'cory', 'fav_fruit_abc123': 'c'},
{'color_94ec39e6': 'black', 'count': 1, 'computed_owner_name_40cc88a0': 'ctsims', 'fav_fruit_abc123': 'b'},
{'color_94ec39e6': 'red', 'count': 3, 'computed_owner_name_40cc88a0': 'daniel', 'fav_fruit_abc123': 'b'},
]
with mock_report_data(cls.data):
with mock_report_configuration_get(cls.report_configs_by_id):
with mock.patch('corehq.apps.app_manager.fixtures.mobile_ucr.get_apps_in_domain',
lambda domain, include_remote: [cls.app]):
with mock_datasource_config():
fixtures = call_fixture_generator(report_fixture_generator, cls.user)
fixture = [f for f in fixtures if f.attrib.get('id') == ReportFixturesProviderV1.id][0]
cls.fixture = ElementTree.tostring(fixture, encoding='utf-8')
def test_filter_entry(self):
self.assertXmlPartialEqual("""
<partial>
<entry>
<command id="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664">
<text>
<locale id="cchq.reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.name"/>
</text>
</command>
<instance id="commcare-reports:index" src="jr://fixture/commcare-reports:index"/>
<instance id="commcaresession" src="jr://instance/session"/>
<instance id="reports" src="jr://fixture/commcare:reports"/>
<session>
<datum autoselect="true" detail-persistent="report_context_tile" id="tile_holder" nodeset="instance('commcare-reports:index')/report_index/reports" value="./@last_update"/>
<datum id="report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_fav_fruit_abc123_1" nodeset="instance('reports')/reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']/filters/filter[@field='fav_fruit_abc123_1']/option" value="./@value" detail-select="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.filter.fav_fruit_abc123_1" />
<datum id="report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_computed_owner_name_40cc88a0_1" nodeset="instance('reports')/reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']/filters/filter[@field='computed_owner_name_40cc88a0_1']/option" value="./@value" detail-select="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.filter.computed_owner_name_40cc88a0_1"/>
<datum id="report_id_a98c812873986df34fd1b4ceb45e6164ae9cc664" nodeset="instance('reports')/reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']" value="./@id" detail-select="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.select" detail-confirm="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.summary" autoselect="true"/>
</session>
</entry>
</partial>
""", self.suite, "entry[1]")
self.assertXmlPartialEqual("""
<partial>
<entry>
<command id="reports.45152061d8dc4d2a8d987a0568abe1ae">
<text>
<locale id="cchq.reports.45152061d8dc4d2a8d987a0568abe1ae.name"/>
</text>
</command>
<instance id="commcare-reports:index" src="jr://fixture/commcare-reports:index"/>
<instance id="reports" src="jr://fixture/commcare:reports"/>
<session>
<datum autoselect="true" detail-persistent="report_context_tile" id="tile_holder" nodeset="instance('commcare-reports:index')/report_index/reports" value="./@last_update"/>
<datum autoselect="true" detail-confirm="reports.45152061d8dc4d2a8d987a0568abe1ae.summary" detail-select="reports.45152061d8dc4d2a8d987a0568abe1ae.select" id="report_id_45152061d8dc4d2a8d987a0568abe1ae" nodeset="instance('reports')/reports/report[@id='45152061d8dc4d2a8d987a0568abe1ae']" value="./@id"/>
</session>
</entry>
</partial>
""", self.suite, "entry[2]")
def test_filter_detail(self):
self.assertXmlPartialEqual("""
<partial>
<detail id="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.filter.computed_owner_name_40cc88a0_1">
<title>
<text>owner name</text>
</title>
<field>
<header>
<text>owner name</text>
</header>
<template>
<text>
<xpath function="."/>
</text>
</template>
</field>
</detail>
</partial>
""", self.suite, "detail[@id='reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.filter.computed_owner_name_40cc88a0_1']")
def test_data_detail(self):
self.assertXmlPartialEqual("""
<partial>
<detail nodeset="rows/row[column[@id='fav_fruit_abc123']=instance('commcaresession')/session/data/report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_fav_fruit_abc123_1][column[@id='computed_owner_name_40cc88a0']=instance('commcaresession')/session/data/report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_computed_owner_name_40cc88a0_1]" id="reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.data">
<title>
<text>
<locale id="cchq.report_data_table"/>
</text>
</title>
<field>
<header width="0">
<text/>
</header>
<template width="0">
<text/>
</template>
<sort direction="ascending" order="1" type="int">
<text>
<xpath function="column[@id='row_index']"/>
</text>
</sort>
</field>
<field>
<header>
<text>
<locale id="cchq.reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.headers.color_94ec39e6"/>
</text>
</header>
<template>
<text>
<xpath function="column[@id='color_94ec39e6']"/>
</text>
</template>
</field>
</detail>
</partial>
""", self.suite, "detail/detail[@id='reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.data']")
def test_graph(self):
self.assertXmlPartialEqual("""
<partial>
<template form="graph">
<graph type="bar">
<series nodeset="instance('reports')/reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']/rows/row[@is_total_row='False'][column[@id='fav_fruit_abc123']=instance('commcaresession')/session/data/report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_fav_fruit_abc123_1][column[@id='computed_owner_name_40cc88a0']=instance('commcaresession')/session/data/report_filter_a98c812873986df34fd1b4ceb45e6164ae9cc664_computed_owner_name_40cc88a0_1]">
<configuration/>
<x function="column[@id='color_94ec39e6']"/>
<y function="column[@id='count']"/>
</series>
<configuration/>
</graph>
</template>
</partial>
""", self.suite, "detail[@id='reports.a98c812873986df34fd1b4ceb45e6164ae9cc664.summary']/detail/field/template[@form='graph']")
def test_fixture_rows(self):
self.assertXmlPartialEqual("""
<partial>
<rows>
<row index="0" is_total_row="False">
<column id="row_index">0</column>
<column id="color_94ec39e6">red</column>
<column id="computed_owner_name_40cc88a0">cory</column>
<column id="count">2</column>
<column id="fav_fruit_abc123">c</column>
</row>
<row index="1" is_total_row="False">
<column id="row_index">1</column>
<column id="color_94ec39e6">black</column>
<column id="computed_owner_name_40cc88a0">ctsims</column>
<column id="count">1</column>
<column id="fav_fruit_abc123">b</column>
</row>
<row index="2" is_total_row="False">
<column id="row_index">2</column>
<column id="color_94ec39e6">red</column>
<column id="computed_owner_name_40cc88a0">daniel</column>
<column id="count">3</column>
<column id="fav_fruit_abc123">b</column>
</row>
</rows>
</partial>
""", self.fixture, "reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']/rows")
def test_fixture_filters(self):
self.assertXmlPartialEqual("""
<partial>
<filters>
<filter field="fav_fruit_abc123_1">
<option value="b">banana</option>
<option value="c">clementine</option>
</filter>
<filter field="computed_owner_name_40cc88a0_1">
<option value="ctsims">Clayton Sims</option>
<option value="cory">Cory Zue</option>
<option value="daniel">Daniel Roberts</option>
</filter>
</filters>
</partial>
""", self.fixture, "reports/report[@id='a98c812873986df34fd1b4ceb45e6164ae9cc664']/filters")
def test_hidden_columns_data_detail(self):
self.assertXmlPartialEqual("""
<partial>
<detail id="reports.45152061d8dc4d2a8d987a0568abe1ae.data" nodeset="rows/row">
<title>
<text>
<locale id="cchq.report_data_table"/>
</text>
</title>
<field>
<header width="0">
<text/>
</header>
<template width="0">
<text/>
</template>
<sort direction="ascending" order="1" type="int">
<text>
<xpath function="column[@id='row_index']"/>
</text>
</sort>
</field>
<field>
<header>
<text>
<locale id="cchq.reports.45152061d8dc4d2a8d987a0568abe1ae.headers.color_94ec39e6"/>
</text>
</header>
<template>
<text>
<xpath function="column[@id='color_94ec39e6']"/>
</text>
</template>
</field>
</detail>
</partial>
""", self.suite, "detail/detail[@id='reports.45152061d8dc4d2a8d987a0568abe1ae.data']")
def test_liveness_fixture(self):
self.assertXmlPartialEqual("""
<partial>
<detail id="report_context_tile">
<title>
<text/>
</title>
<field>
<style horz-align="left" font-size="small">
<grid grid-height="1" grid-width="12" grid-x="0" grid-y="0"/>
</style>
<header>
<text/>
</header>
<template>
<text>
<xpath function="concat($message, ' ', format-date(date(instance('commcare-reports:index')/report_index/reports/@last_update), '%e/%n/%Y'))">
<variable name="message">
<locale id="cchq.reports_last_updated_on"/>
</variable>
</xpath>
</text>
</template>
</field>
</detail>
</partial>
""", self.suite, "detail[@id='report_context_tile']")
# Entry for form from case module
self.assertXmlPartialEqual("""
<partial>
<entry>
<form>http://openrosa.org/formdesigner/2423EFB5-2E8C-4B8F-9DA0-23FFFD4391AF</form>
<session>
<datum id="tile_holder" nodeset="instance('commcare-reports:index')/report_index/reports" value="./@last_update" detail-persistent="report_context_tile" autoselect="true"/>
<datum id="case_id" nodeset="instance('casedb')/casedb/case[@case_type='fish'][@status='open']" value="./@case_id" detail-select="m1_case_short" detail-confirm="m1_case_long"/>
</session>
<command id="m1-f0">
<text>
<locale id="forms.m1f0"/>
</text>
</command>
<instance id="casedb" src="jr://instance/casedb"/>
<instance id="commcare-reports:index" src="jr://fixture/commcare-reports:index"/>
</entry>
</partial>
""", self.suite, "entry[3]")
# Entry for form from survey module
self.assertXmlPartialEqual("""
<partial>
<entry>
<form>http://openrosa.org/formdesigner/2423EFB5-2E8C-4B8F-9DA0-23FFFD4391AE</form>
<session>
<datum id="tile_holder" nodeset="instance('commcare-reports:index')/report_index/reports" value="./@last_update" detail-persistent="report_context_tile" autoselect="true"/>
</session>
<command id="m2-f0">
<text>
<locale id="forms.m2f0"/>
</text>
</command>
<instance id="commcare-reports:index" src="jr://fixture/commcare-reports:index"/>
</entry>
</partial>
""", self.suite, "entry[4]")
class TestReportAutoFilters(SimpleTestCase):
def test_get_filter_function(self):
fn = _get_auto_filter_function('user_id')
self.assertEqual(fn, _filter_by_user_id)
@flag_enabled('MOBILE_UCR')
class TestReportConfigInstances(TestCase, TestXmlMixin):
file_path = ('data',)
domain = 'test_report_config_instances'
def test_autogenerate_instance_declaration(self):
app = self._make_app("Untitled Application")
report_app_config = self._make_report_app_config("my_report")
module = self._add_report_module(app, report_app_config)
form = self._add_form_with_report_reference(app, report_app_config)
expected_declaration = ("""<instance id="commcare-reports:{}" src="jr://fixture/commcare-reports:{}"/>"""
.format(report_app_config.report_slug, report_app_config.uuid))
self.assertIn(expected_declaration, self._render_form(app, form))
def test_disallow_duplicate_slugs(self):
app = self._make_app("Untitled Application")
report_app_config1 = self._make_report_app_config("duplicate")
module1 = self._add_report_module(app, report_app_config1)
report_app_config2 = self._make_report_app_config("duplicate")
module2 = self._add_report_module(app, report_app_config2)
errors = module2.validate_for_build()
self.assertEqual('report config id duplicated', errors[0]['type'])
def test_allow_duplicates_on_different_apps(self):
app1 = self._make_app("Untitled Application")
report_app_config1 = self._make_report_app_config("duplicate")
module1 = self._add_report_module(app1, report_app_config1)
app2 = self._make_app("Untitled Application")
report_app_config2 = self._make_report_app_config("duplicate")
module2 = self._add_report_module(app2, report_app_config2)
errors = module2.validate_for_build()
self.assertEqual([], errors)
def _make_app(self, app_name):
app = Application.new_app(self.domain, app_name)
app.mobile_ucr_restore_version = MOBILE_UCR_VERSION_2
app.save()
self.addCleanup(app.delete)
return app
def _make_report_app_config(self, report_slug):
report = get_sample_report_config()
report.domain = self.domain
report.save()
self.addCleanup(report.delete)
report_app_config = ReportAppConfig(
report_id=report._id,
report_slug=report_slug,
)
report_app_config._report = report
return report_app_config
def _add_report_module(self, app, report_app_config):
report_module = app.add_module(ReportModule.new_module('Reports', None))
report_module.report_configs = [report_app_config]
app.save()
return report_module
def _add_form_with_report_reference(self, app, report_app_config):
other_module = app.add_module(Module.new_module('m0', None))
form = other_module.new_form('f0', None)
report_reference = "instance('commcare-reports:{}')/rows/row[0]/@index".format(report_app_config.report_slug)
form.source = self.get_xml('very_simple_form').decode('utf-8')
form.source = form.source.replace(
"""<bind nodeset="/data/question1" type="xsd:string"/>""",
"""<bind nodeset="/data/question1" type="xsd:string" calculate="{}"/>""".format(report_reference),
)
app.save()
return form
def _render_form(self, app, form):
with mock.patch('corehq.apps.app_manager.suite_xml.features.mobile_ucr.get_apps_in_domain', lambda d: [app]):
return form.render_xform().decode('utf-8')
| 42.564228 | 465 | 0.609428 |
c4c0099babc89d65a186de6b33dece415e74b6fb | 1,108 | py | Python | RDSV/build_sets.py | JeffT13/rd-diarization | 5fcf70f1a2b3f15de3cc66e3686844c6eacb4f15 | [
"Apache-2.0"
] | 3 | 2021-03-31T03:38:54.000Z | 2021-05-05T05:46:18.000Z | RDSV/build_sets.py | JeffT13/rd-diarization | 5fcf70f1a2b3f15de3cc66e3686844c6eacb4f15 | [
"Apache-2.0"
] | null | null | null | RDSV/build_sets.py | JeffT13/rd-diarization | 5fcf70f1a2b3f15de3cc66e3686844c6eacb4f15 | [
"Apache-2.0"
] | null | null | null | import os, sys, json, random
from math import floor
from param import *
cases = os.listdir(audio_path)
r_set = []
d_set = []
t_set = []
for d in train_dockets:
dock = [a for a in cases if a[:2]==d]
if seed is not None:
random.Random(seed).shuffle(dock)
else:
random.Random().shuffle(dock)
r_set.append(dock[:r_count])
d_set.append(dock[r_count:(r_count+d_count)])
r_set = [item for sublist in r_set for item in sublist]
d_set = [item for sublist in d_set for item in sublist]
split = floor(t_lim/len(test_dockets))
for t in test_dockets:
dock = [a for a in cases if a[:2]==t]
if seed is not None:
random.Random(seed).shuffle(dock)
else:
random.Random().shuffle(dock)
if t_lim is not None and split<len(dock):
t_set.append(dock[:split])
else:
t_set.append(dock)
t_set = [item for sublist in t_set for item in sublist]
set_dict = {'r':r_set, 'd':d_set, 't':t_set}
with open(set_path, 'w') as setfile:
json.dump(set_dict, setfile)
if verbose:
print('Case Set Dict Saved')
| 24.086957 | 55 | 0.631769 |
2918a733ab0ad0ecfadfadd2fee94ff41e1dc61e | 11,650 | py | Python | mongo_connector/doc_managers/mongo_doc_manager.py | vurankar/mongo-connector | 202aa28743855643fddd77d3e66bf1a640df3ed6 | [
"Apache-2.0"
] | 2 | 2018-09-20T15:52:48.000Z | 2021-04-25T07:20:38.000Z | mongo_connector/doc_managers/mongo_doc_manager.py | vurankar/mongo-connector | 202aa28743855643fddd77d3e66bf1a640df3ed6 | [
"Apache-2.0"
] | 13 | 2017-08-07T04:36:25.000Z | 2021-02-08T17:37:27.000Z | mongo_connector/doc_managers/mongo_doc_manager.py | vurankar/mongo-connector | 202aa28743855643fddd77d3e66bf1a640df3ed6 | [
"Apache-2.0"
] | 4 | 2018-10-22T17:30:46.000Z | 2020-07-07T21:24:48.000Z | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for MongoDB, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import logging
import pymongo
from bson import SON
from gridfs import GridFS
from mongo_connector import errors, constants
from mongo_connector.util import exception_wrapper
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
wrap_exceptions = exception_wrapper({
pymongo.errors.ConnectionFailure: errors.ConnectionFailed,
pymongo.errors.OperationFailure: errors.OperationFailed})
LOG = logging.getLogger(__name__)
__version__ = constants.__version__
"""MongoDB DocManager version information
This is packaged with mongo-connector so it shares the same version.
Downstream DocManager implementations should add their package __version__
string here, for example:
__version__ = '0.1.0'
"""
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that
multiple updates to the same doc reflect the most up to date version as
opposed to multiple, slightly different versions of a doc.
We are using MongoDB native fields for _id and ns, but we also store
them as fields in the document, due to compatibility issues.
"""
def __init__(self, url, **kwargs):
""" Verify URL and establish a connection.
"""
try:
self.mongo = pymongo.MongoClient(
url, **kwargs.get('clientOptions', {}))
except pymongo.errors.InvalidURI:
raise errors.ConnectionFailed("Invalid URI for MongoDB")
except pymongo.errors.ConnectionFailure:
raise errors.ConnectionFailed("Failed to connect to MongoDB")
self.chunk_size = kwargs.get('chunk_size', constants.DEFAULT_MAX_BULK)
self.use_single_meta_collection = kwargs.get(
'use_single_meta_collection',
False)
self.meta_collection_name = kwargs.get(
'meta_collection_name',
constants.DEFAULT_META_COLLECTION_NAME)
self.meta_collection_cap_size = kwargs.get(
'meta_collection_cap_size',
constants.DEFAULT_META_COLLECTION_CAP_SIZE)
# The '_id' field has to be unique, so if we will be writing data from
# different namespaces into single collection, we use a different field
# for storing the document id.
self.id_field = 'doc_id' if self.use_single_meta_collection else '_id'
self.meta_database = self.mongo["__mongo_connector"]
# Create the meta collection as capped if a single meta collection is
# preferred
if self.use_single_meta_collection:
if (self.meta_collection_name not in
self.meta_database.collection_names()):
self.meta_database.create_collection(
self.meta_collection_name,
capped=True,
size=self.meta_collection_cap_size)
meta_collection = self.meta_database[self.meta_collection_name]
meta_collection.create_index(self.id_field)
meta_collection.create_index([('ns', 1), ('_ts', 1)])
def _db_and_collection(self, namespace):
return namespace.split('.', 1)
def _get_meta_collection(self, namespace):
if self.use_single_meta_collection:
return self.meta_collection_name
else:
return namespace
@wrap_exceptions
def _meta_collections(self):
"""Provides the meta collections currently being used
"""
if self.use_single_meta_collection:
yield self.meta_collection_name
else:
for name in self.meta_database.collection_names(
include_system_collections=False):
yield name
def stop(self):
"""Stops any running threads
"""
LOG.info(
"Mongo DocManager Stopped: If you will not target this system "
"again with mongo-connector then you may drop the database "
"__mongo_connector, which holds metadata for Mongo Connector."
)
@wrap_exceptions
def handle_command(self, doc, namespace, timestamp):
db, _ = self._db_and_collection(namespace)
if doc.get('dropDatabase'):
for new_db in self.command_helper.map_db(db):
self.mongo.drop_database(new_db)
if doc.get('renameCollection'):
a = self.command_helper.map_namespace(doc['renameCollection'])
b = self.command_helper.map_namespace(doc['to'])
if a and b:
self.mongo.admin.command(
"renameCollection", a, to=b)
if doc.get('create'):
new_db, coll = self.command_helper.map_collection(
db, doc['create'])
if new_db:
self.mongo[new_db].create_collection(coll)
if doc.get('drop'):
new_db, coll = self.command_helper.map_collection(
db, doc['drop'])
if new_db:
self.mongo[new_db].drop_collection(coll)
@wrap_exceptions
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
db, coll = self._db_and_collection(namespace)
meta_collection_name = self._get_meta_collection(namespace)
self.meta_database[meta_collection_name].replace_one(
{self.id_field: document_id, "ns": namespace},
{self.id_field: document_id,
"_ts": timestamp,
"ns": namespace},
upsert=True)
no_obj_error = "No matching object found"
updated = self.mongo[db].command(
SON([('findAndModify', coll),
('query', {'_id': document_id}),
('update', update_spec),
('new', True)]),
allowable_errors=[no_obj_error])['value']
return updated
@wrap_exceptions
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Mongo
"""
database, coll = self._db_and_collection(namespace)
meta_collection_name = self._get_meta_collection(namespace)
self.meta_database[meta_collection_name].replace_one(
{self.id_field: doc['_id'], "ns": namespace},
{self.id_field: doc['_id'],
"_ts": timestamp,
"ns": namespace},
upsert=True)
self.mongo[database][coll].replace_one(
{'_id': doc['_id']},
doc,
upsert=True)
@wrap_exceptions
def bulk_upsert(self, docs, namespace, timestamp):
def iterate_chunks():
dbname, collname = self._db_and_collection(namespace)
collection = self.mongo[dbname][collname]
meta_collection_name = self._get_meta_collection(namespace)
meta_collection = self.meta_database[meta_collection_name]
more_chunks = True
while more_chunks:
bulk = collection.initialize_ordered_bulk_op()
bulk_meta = meta_collection.initialize_ordered_bulk_op()
for i in range(self.chunk_size):
try:
doc = next(docs)
selector = {'_id': doc['_id']}
bulk.find(selector).upsert().replace_one(doc)
meta_selector = {self.id_field: doc['_id']}
bulk_meta.find(meta_selector).upsert().replace_one({
self.id_field: doc['_id'],
'ns': namespace,
'_ts': timestamp
})
except StopIteration:
more_chunks = False
if i > 0:
yield bulk, bulk_meta
break
if more_chunks:
yield bulk, bulk_meta
for bulk_op, meta_bulk_op in iterate_chunks():
try:
bulk_op.execute()
meta_bulk_op.execute()
except pymongo.errors.DuplicateKeyError as e:
LOG.warn('Continuing after DuplicateKeyError: '
+ str(e))
except pymongo.errors.BulkWriteError as bwe:
LOG.error(bwe.details)
raise e
@wrap_exceptions
def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete(
{self.id_field: document_id})
if (doc2 and doc2.get('gridfs_id')):
GridFS(self.mongo[database], coll).delete(doc2['gridfs_id'])
else:
self.mongo[database][coll].delete_one({'_id': document_id})
@wrap_exceptions
def insert_file(self, f, namespace, timestamp):
database, coll = self._db_and_collection(namespace)
id = GridFS(self.mongo[database], coll).put(f, filename=f.filename)
meta_collection = self._get_meta_collection(namespace)
self.meta_database[meta_collection].replace_one(
{self.id_field: f._id, "ns": namespace},
{self.id_field: f._id, '_ts': timestamp,
'ns': namespace, 'gridfs_id': id},
upsert=True)
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Mongo for documents in a time range.
"""
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(
{'_ts': {'$lte': end_ts, '$gte': start_ts}}):
yield ts_ns_doc
def commit(self):
""" Performs a commit
"""
return
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in Mongo.
"""
def docs_by_ts():
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(limit=-1).sort('_ts', -1):
yield ts_ns_doc
return max(docs_by_ts(), key=lambda x: x["_ts"])
| 38.448845 | 79 | 0.618112 |
6cfe8bda201ab302d00b02de41ebdcb6f2601e62 | 5,675 | py | Python | tests/unit/test_runner.py | magnologan/syntribos | e7da2d1e1e8b0724cff0d5d33e5cdaa01551bad2 | [
"Apache-2.0"
] | 277 | 2015-09-23T22:55:16.000Z | 2020-05-17T18:45:46.000Z | tests/unit/test_runner.py | magnologan/syntribos | e7da2d1e1e8b0724cff0d5d33e5cdaa01551bad2 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_runner.py | magnologan/syntribos | e7da2d1e1e8b0724cff0d5d33e5cdaa01551bad2 | [
"Apache-2.0"
] | 72 | 2016-01-04T18:57:06.000Z | 2020-05-07T14:07:30.000Z | # Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
import syntribos.config
from syntribos.runner import Runner
import syntribos.tests
syntribos.config.register_opts()
class RunnerUnittest(testtools.TestCase):
r = Runner()
common_endings = ["BODY", "HEADERS", "PARAMS", "URL"]
def _compare_tests(self, expected, loaded):
"""Compare list of expected test names with those that were loaded."""
# loaded_test_names = []
loaded_test_names = [x[0] for x in loaded]
self.assertEqual(expected, loaded_test_names)
def test_get_LDAP_tests(self):
"""Check that we get the proper LDAP tests."""
expected = ["LDAP_INJECTION_" + x for x in self.common_endings]
loaded_tests = self.r.get_tests(["LDAP"])
self._compare_tests(expected, loaded_tests)
def test_get_SQL_tests(self):
"""Check that we get the proper SQLi tests."""
expected = ["SQL_INJECTION_" + x for x in self.common_endings]
loaded_tests = self.r.get_tests(["SQL"])
self._compare_tests(expected, loaded_tests)
def test_get_XXE_tests(self):
"""Check that we get the proper XXE tests."""
expected = ["XML_EXTERNAL_ENTITY_BODY"]
loaded_tests = self.r.get_tests(["XML"])
self._compare_tests(expected, loaded_tests)
def test_get_int_overflow_tests(self):
"""Check that we get the proper integer overflow tests."""
expected = ["INTEGER_OVERFLOW_" + x for x in self.common_endings]
loaded_tests = self.r.get_tests(["INTEGER_OVERFLOW"])
self._compare_tests(expected, loaded_tests)
def test_get_buffer_overflow_tests(self):
"""Check that we get the proper buffer overflow tests."""
expected = ["BUFFER_OVERFLOW_" + x for x in self.common_endings]
loaded_tests = self.r.get_tests(["BUFFER_OVERFLOW"])
self._compare_tests(expected, loaded_tests)
def test_get_command_injection_tests(self):
"""Check that we get the proper command injection tests."""
expected = ["COMMAND_INJECTION_" + x for x in self.common_endings]
loaded_tests = self.r.get_tests(["COMMAND_INJECTION"])
self._compare_tests(expected, loaded_tests)
def test_get_string_validation_tests(self):
"""Check that we get the proper string validation tests."""
expected = [
"STRING_VALIDATION_" + x for x in self.common_endings
]
loaded_tests = self.r.get_tests(["STRING_VALIDATION"])
self._compare_tests(expected, loaded_tests)
def test_get_xss_test(self):
"""Check that we get only the XSS_BODY test from get_tests."""
expected = ["XSS_BODY"]
loaded_tests = self.r.get_tests(["XSS"])
self._compare_tests(expected, loaded_tests)
def test_get_ssl_test(self):
"""Check that we get only the SSL test from get_tests."""
expected = ["SSL_ENDPOINT_BODY"]
loaded_tests = self.r.get_tests(["SSL"])
self._compare_tests(expected, loaded_tests)
def test_get_cors_test(self):
"""Check that we get only the CORS_HEADER test from get_tests."""
expected = ["CORS_WILDCARD_HEADERS"]
loaded_tests = self.r.get_tests(["CORS_WILDCARD_HEADERS"])
self._compare_tests(expected, loaded_tests)
def test_get_sql_tests_exclude_header(self):
"""Check that we get the right SQL tests when "HEADER" is excluded."""
expected = [
"SQL_INJECTION_BODY", "SQL_INJECTION_PARAMS", "SQL_INJECTION_URL"]
loaded_tests = self.r.get_tests(["SQL"], ["HEADER"])
self._compare_tests(expected, loaded_tests)
def test_get_sql_tests_exclude_header_url(self):
"""Check that we get the right SQL tests, excluding HEADER/URL."""
expected = [
"SQL_INJECTION_BODY", "SQL_INJECTION_PARAMS"]
loaded_tests = self.r.get_tests(["SQL"], ["HEADER", "URL"])
self._compare_tests(expected, loaded_tests)
def test_get_sql_tests_exclude_header_url_body(self):
"""Check that we get the right SQL tests, excluding HEADER/URL/BODY."""
expected = ["SQL_INJECTION_PARAMS"]
loaded_tests = self.r.get_tests(["SQL"], ["HEADER", "URL", "BODY"])
self._compare_tests(expected, loaded_tests)
def test_get_rce_sql_tests_exclude_url_body(self):
"""Check that we get the right SQL tests, excluding HEADER/URL/BODY."""
expected = [
"SQL_INJECTION_HEADERS", "SQL_INJECTION_PARAMS",
"COMMAND_INJECTION_HEADERS", "COMMAND_INJECTION_PARAMS"]
loaded_tests = self.r.get_tests(["SQL", "COMMAND"], ["URL", "BODY"])
self._compare_tests(expected, loaded_tests)
def test_list_tests(self):
"""Check that we can list tests and exit successfully."""
self.r.list_tests()
def test_run_empty_tests(self):
"""Call Runner.run_given_tests with an empty list for sanity check."""
self.r.run_given_tests([], "", "")
def test_dry_run_empty_tests(self):
"""Call Runner.dry_run with empty list for sanity check."""
self.r.dry_run([], "", "", {})
| 42.037037 | 79 | 0.676828 |
21eb59ddc03c8a173779de776d39070d29471168 | 7,412 | py | Python | tests/components/cloud/test_client.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 3 | 2020-05-05T22:12:27.000Z | 2020-11-25T23:55:34.000Z | tests/components/cloud/test_client.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:46:31.000Z | 2022-03-12T00:35:09.000Z | tests/components/cloud/test_client.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Test the cloud.iot module."""
from aiohttp import web
import pytest
from homeassistant.components.cloud import DOMAIN
from homeassistant.components.cloud.client import CloudClient
from homeassistant.components.cloud.const import PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from . import mock_cloud, mock_cloud_prefs
from tests.async_mock import AsyncMock, MagicMock, patch
from tests.components.alexa import test_smart_home as test_alexa
@pytest.fixture
def mock_cloud_inst():
"""Mock cloud class."""
return MagicMock(subscription_expired=False)
async def test_handler_alexa(hass):
"""Test handler Alexa."""
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
hass.states.async_set("switch.test2", "on", {"friendly_name": "Test switch 2"})
await mock_cloud(
hass,
{
"alexa": {
"filter": {"exclude_entities": "switch.test2"},
"entity_config": {
"switch.test": {
"name": "Config name",
"description": "Config description",
"display_categories": "LIGHT",
}
},
}
},
)
mock_cloud_prefs(hass)
cloud = hass.data["cloud"]
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request("Alexa.Discovery", "Discover")
)
endpoints = resp["event"]["payload"]["endpoints"]
assert len(endpoints) == 1
device = endpoints[0]
assert device["description"] == "Config description via Home Assistant"
assert device["friendlyName"] == "Config name"
assert device["displayCategories"] == ["LIGHT"]
assert device["manufacturerName"] == "Home Assistant"
async def test_handler_alexa_disabled(hass, mock_cloud_fixture):
"""Test handler Alexa when user has disabled it."""
mock_cloud_fixture._prefs[PREF_ENABLE_ALEXA] = False
cloud = hass.data["cloud"]
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request("Alexa.Discovery", "Discover")
)
assert resp["event"]["header"]["namespace"] == "Alexa"
assert resp["event"]["header"]["name"] == "ErrorResponse"
assert resp["event"]["payload"]["type"] == "BRIDGE_UNREACHABLE"
async def test_handler_google_actions(hass):
"""Test handler Google Actions."""
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
hass.states.async_set("switch.test2", "on", {"friendly_name": "Test switch 2"})
hass.states.async_set("group.all_locks", "on", {"friendly_name": "Evil locks"})
await mock_cloud(
hass,
{
"google_actions": {
"filter": {"exclude_entities": "switch.test2"},
"entity_config": {
"switch.test": {
"name": "Config name",
"aliases": "Config alias",
"room": "living room",
}
},
}
},
)
mock_cloud_prefs(hass)
cloud = hass.data["cloud"]
reqid = "5711642932632160983"
data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]}
with patch(
"hass_nabucasa.Cloud._decode_claims",
return_value={"cognito:username": "myUserName"},
):
await cloud.client.get_google_config()
resp = await cloud.client.async_google_message(data)
assert resp["requestId"] == reqid
payload = resp["payload"]
assert payload["agentUserId"] == "myUserName"
devices = payload["devices"]
assert len(devices) == 1
device = devices[0]
assert device["id"] == "switch.test"
assert device["name"]["name"] == "Config name"
assert device["name"]["nicknames"] == ["Config name", "Config alias"]
assert device["type"] == "action.devices.types.SWITCH"
assert device["roomHint"] == "living room"
async def test_handler_google_actions_disabled(hass, mock_cloud_fixture):
"""Test handler Google Actions when user has disabled it."""
mock_cloud_fixture._prefs[PREF_ENABLE_GOOGLE] = False
with patch("hass_nabucasa.Cloud.start"):
assert await async_setup_component(hass, "cloud", {})
reqid = "5711642932632160983"
data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]}
cloud = hass.data["cloud"]
resp = await cloud.client.async_google_message(data)
assert resp["requestId"] == reqid
assert resp["payload"]["errorCode"] == "deviceTurnedOff"
async def test_webhook_msg(hass):
"""Test webhook msg."""
with patch("hass_nabucasa.Cloud.start"):
setup = await async_setup_component(hass, "cloud", {"cloud": {}})
assert setup
cloud = hass.data["cloud"]
await cloud.client.prefs.async_initialize()
await cloud.client.prefs.async_update(
cloudhooks={
"hello": {"webhook_id": "mock-webhook-id", "cloudhook_id": "mock-cloud-id"}
}
)
received = []
async def handler(hass, webhook_id, request):
"""Handle a webhook."""
received.append(request)
return web.json_response({"from": "handler"})
hass.components.webhook.async_register("test", "Test", "mock-webhook-id", handler)
response = await cloud.client.async_webhook_message(
{
"cloudhook_id": "mock-cloud-id",
"body": '{"hello": "world"}',
"headers": {"content-type": "application/json"},
"method": "POST",
"query": None,
}
)
assert response == {
"status": 200,
"body": '{"from": "handler"}',
"headers": {"Content-Type": "application/json"},
}
assert len(received) == 1
assert await received[0].json() == {"hello": "world"}
async def test_google_config_expose_entity(hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config exposing entity method uses latest config."""
cloud_client = hass.data[DOMAIN].client
state = State("light.kitchen", "on")
gconf = await cloud_client.get_google_config()
assert gconf.should_expose(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id="light.kitchen", should_expose=False
)
assert not gconf.should_expose(state)
async def test_google_config_should_2fa(hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config disabling 2FA method uses latest config."""
cloud_client = hass.data[DOMAIN].client
gconf = await cloud_client.get_google_config()
state = State("light.kitchen", "on")
assert gconf.should_2fa(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id="light.kitchen", disable_2fa=True
)
assert not gconf.should_2fa(state)
async def test_set_username(hass):
"""Test we set username during login."""
prefs = MagicMock(
alexa_enabled=False,
google_enabled=False,
async_set_username=AsyncMock(return_value=None),
)
client = CloudClient(hass, prefs, None, {}, {})
client.cloud = MagicMock(is_logged_in=True, username="mock-username")
await client.logged_in()
assert len(prefs.async_set_username.mock_calls) == 1
assert prefs.async_set_username.mock_calls[0][1][0] == "mock-username"
| 32.226087 | 87 | 0.636805 |
7cb53e804b08a4942950729372d9387d3efadf54 | 15,779 | py | Python | alipay/aop/api/domain/AlipayUserAgreementPageSignModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserAgreementPageSignModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserAgreementPageSignModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AccessParams import AccessParams
from alipay.aop.api.domain.DeviceParams import DeviceParams
from alipay.aop.api.domain.IdentityParams import IdentityParams
from alipay.aop.api.domain.PeriodRuleParams import PeriodRuleParams
from alipay.aop.api.domain.ProdParams import ProdParams
from alipay.aop.api.domain.SpecifiedAsset import SpecifiedAsset
from alipay.aop.api.domain.SpecifiedChannelParam import SpecifiedChannelParam
from alipay.aop.api.domain.SubMerchantParams import SubMerchantParams
from alipay.aop.api.domain.ZmAuthParams import ZmAuthParams
class AlipayUserAgreementPageSignModel(object):
def __init__(self):
self._access_params = None
self._agreement_effect_type = None
self._allow_huazhi_degrade = None
self._device_params = None
self._effect_time = None
self._external_agreement_no = None
self._external_logon_id = None
self._identity_params = None
self._merchant_process_url = None
self._pass_params = None
self._period_rule_params = None
self._personal_product_code = None
self._prod_params = None
self._product_code = None
self._promo_params = None
self._sign_scene = None
self._sign_validity_period = None
self._specified_asset = None
self._specified_sort_channel_params = None
self._sub_merchant = None
self._third_party_type = None
self._user_age_range = None
self._zm_auth_params = None
@property
def access_params(self):
return self._access_params
@access_params.setter
def access_params(self, value):
if isinstance(value, AccessParams):
self._access_params = value
else:
self._access_params = AccessParams.from_alipay_dict(value)
@property
def agreement_effect_type(self):
return self._agreement_effect_type
@agreement_effect_type.setter
def agreement_effect_type(self, value):
self._agreement_effect_type = value
@property
def allow_huazhi_degrade(self):
return self._allow_huazhi_degrade
@allow_huazhi_degrade.setter
def allow_huazhi_degrade(self, value):
self._allow_huazhi_degrade = value
@property
def device_params(self):
return self._device_params
@device_params.setter
def device_params(self, value):
if isinstance(value, DeviceParams):
self._device_params = value
else:
self._device_params = DeviceParams.from_alipay_dict(value)
@property
def effect_time(self):
return self._effect_time
@effect_time.setter
def effect_time(self, value):
self._effect_time = value
@property
def external_agreement_no(self):
return self._external_agreement_no
@external_agreement_no.setter
def external_agreement_no(self, value):
self._external_agreement_no = value
@property
def external_logon_id(self):
return self._external_logon_id
@external_logon_id.setter
def external_logon_id(self, value):
self._external_logon_id = value
@property
def identity_params(self):
return self._identity_params
@identity_params.setter
def identity_params(self, value):
if isinstance(value, IdentityParams):
self._identity_params = value
else:
self._identity_params = IdentityParams.from_alipay_dict(value)
@property
def merchant_process_url(self):
return self._merchant_process_url
@merchant_process_url.setter
def merchant_process_url(self, value):
self._merchant_process_url = value
@property
def pass_params(self):
return self._pass_params
@pass_params.setter
def pass_params(self, value):
self._pass_params = value
@property
def period_rule_params(self):
return self._period_rule_params
@period_rule_params.setter
def period_rule_params(self, value):
if isinstance(value, PeriodRuleParams):
self._period_rule_params = value
else:
self._period_rule_params = PeriodRuleParams.from_alipay_dict(value)
@property
def personal_product_code(self):
return self._personal_product_code
@personal_product_code.setter
def personal_product_code(self, value):
self._personal_product_code = value
@property
def prod_params(self):
return self._prod_params
@prod_params.setter
def prod_params(self, value):
if isinstance(value, ProdParams):
self._prod_params = value
else:
self._prod_params = ProdParams.from_alipay_dict(value)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def promo_params(self):
return self._promo_params
@promo_params.setter
def promo_params(self, value):
self._promo_params = value
@property
def sign_scene(self):
return self._sign_scene
@sign_scene.setter
def sign_scene(self, value):
self._sign_scene = value
@property
def sign_validity_period(self):
return self._sign_validity_period
@sign_validity_period.setter
def sign_validity_period(self, value):
self._sign_validity_period = value
@property
def specified_asset(self):
return self._specified_asset
@specified_asset.setter
def specified_asset(self, value):
if isinstance(value, SpecifiedAsset):
self._specified_asset = value
else:
self._specified_asset = SpecifiedAsset.from_alipay_dict(value)
@property
def specified_sort_channel_params(self):
return self._specified_sort_channel_params
@specified_sort_channel_params.setter
def specified_sort_channel_params(self, value):
if isinstance(value, SpecifiedChannelParam):
self._specified_sort_channel_params = value
else:
self._specified_sort_channel_params = SpecifiedChannelParam.from_alipay_dict(value)
@property
def sub_merchant(self):
return self._sub_merchant
@sub_merchant.setter
def sub_merchant(self, value):
if isinstance(value, SubMerchantParams):
self._sub_merchant = value
else:
self._sub_merchant = SubMerchantParams.from_alipay_dict(value)
@property
def third_party_type(self):
return self._third_party_type
@third_party_type.setter
def third_party_type(self, value):
self._third_party_type = value
@property
def user_age_range(self):
return self._user_age_range
@user_age_range.setter
def user_age_range(self, value):
self._user_age_range = value
@property
def zm_auth_params(self):
return self._zm_auth_params
@zm_auth_params.setter
def zm_auth_params(self, value):
if isinstance(value, ZmAuthParams):
self._zm_auth_params = value
else:
self._zm_auth_params = ZmAuthParams.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.access_params:
if hasattr(self.access_params, 'to_alipay_dict'):
params['access_params'] = self.access_params.to_alipay_dict()
else:
params['access_params'] = self.access_params
if self.agreement_effect_type:
if hasattr(self.agreement_effect_type, 'to_alipay_dict'):
params['agreement_effect_type'] = self.agreement_effect_type.to_alipay_dict()
else:
params['agreement_effect_type'] = self.agreement_effect_type
if self.allow_huazhi_degrade:
if hasattr(self.allow_huazhi_degrade, 'to_alipay_dict'):
params['allow_huazhi_degrade'] = self.allow_huazhi_degrade.to_alipay_dict()
else:
params['allow_huazhi_degrade'] = self.allow_huazhi_degrade
if self.device_params:
if hasattr(self.device_params, 'to_alipay_dict'):
params['device_params'] = self.device_params.to_alipay_dict()
else:
params['device_params'] = self.device_params
if self.effect_time:
if hasattr(self.effect_time, 'to_alipay_dict'):
params['effect_time'] = self.effect_time.to_alipay_dict()
else:
params['effect_time'] = self.effect_time
if self.external_agreement_no:
if hasattr(self.external_agreement_no, 'to_alipay_dict'):
params['external_agreement_no'] = self.external_agreement_no.to_alipay_dict()
else:
params['external_agreement_no'] = self.external_agreement_no
if self.external_logon_id:
if hasattr(self.external_logon_id, 'to_alipay_dict'):
params['external_logon_id'] = self.external_logon_id.to_alipay_dict()
else:
params['external_logon_id'] = self.external_logon_id
if self.identity_params:
if hasattr(self.identity_params, 'to_alipay_dict'):
params['identity_params'] = self.identity_params.to_alipay_dict()
else:
params['identity_params'] = self.identity_params
if self.merchant_process_url:
if hasattr(self.merchant_process_url, 'to_alipay_dict'):
params['merchant_process_url'] = self.merchant_process_url.to_alipay_dict()
else:
params['merchant_process_url'] = self.merchant_process_url
if self.pass_params:
if hasattr(self.pass_params, 'to_alipay_dict'):
params['pass_params'] = self.pass_params.to_alipay_dict()
else:
params['pass_params'] = self.pass_params
if self.period_rule_params:
if hasattr(self.period_rule_params, 'to_alipay_dict'):
params['period_rule_params'] = self.period_rule_params.to_alipay_dict()
else:
params['period_rule_params'] = self.period_rule_params
if self.personal_product_code:
if hasattr(self.personal_product_code, 'to_alipay_dict'):
params['personal_product_code'] = self.personal_product_code.to_alipay_dict()
else:
params['personal_product_code'] = self.personal_product_code
if self.prod_params:
if hasattr(self.prod_params, 'to_alipay_dict'):
params['prod_params'] = self.prod_params.to_alipay_dict()
else:
params['prod_params'] = self.prod_params
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.promo_params:
if hasattr(self.promo_params, 'to_alipay_dict'):
params['promo_params'] = self.promo_params.to_alipay_dict()
else:
params['promo_params'] = self.promo_params
if self.sign_scene:
if hasattr(self.sign_scene, 'to_alipay_dict'):
params['sign_scene'] = self.sign_scene.to_alipay_dict()
else:
params['sign_scene'] = self.sign_scene
if self.sign_validity_period:
if hasattr(self.sign_validity_period, 'to_alipay_dict'):
params['sign_validity_period'] = self.sign_validity_period.to_alipay_dict()
else:
params['sign_validity_period'] = self.sign_validity_period
if self.specified_asset:
if hasattr(self.specified_asset, 'to_alipay_dict'):
params['specified_asset'] = self.specified_asset.to_alipay_dict()
else:
params['specified_asset'] = self.specified_asset
if self.specified_sort_channel_params:
if hasattr(self.specified_sort_channel_params, 'to_alipay_dict'):
params['specified_sort_channel_params'] = self.specified_sort_channel_params.to_alipay_dict()
else:
params['specified_sort_channel_params'] = self.specified_sort_channel_params
if self.sub_merchant:
if hasattr(self.sub_merchant, 'to_alipay_dict'):
params['sub_merchant'] = self.sub_merchant.to_alipay_dict()
else:
params['sub_merchant'] = self.sub_merchant
if self.third_party_type:
if hasattr(self.third_party_type, 'to_alipay_dict'):
params['third_party_type'] = self.third_party_type.to_alipay_dict()
else:
params['third_party_type'] = self.third_party_type
if self.user_age_range:
if hasattr(self.user_age_range, 'to_alipay_dict'):
params['user_age_range'] = self.user_age_range.to_alipay_dict()
else:
params['user_age_range'] = self.user_age_range
if self.zm_auth_params:
if hasattr(self.zm_auth_params, 'to_alipay_dict'):
params['zm_auth_params'] = self.zm_auth_params.to_alipay_dict()
else:
params['zm_auth_params'] = self.zm_auth_params
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAgreementPageSignModel()
if 'access_params' in d:
o.access_params = d['access_params']
if 'agreement_effect_type' in d:
o.agreement_effect_type = d['agreement_effect_type']
if 'allow_huazhi_degrade' in d:
o.allow_huazhi_degrade = d['allow_huazhi_degrade']
if 'device_params' in d:
o.device_params = d['device_params']
if 'effect_time' in d:
o.effect_time = d['effect_time']
if 'external_agreement_no' in d:
o.external_agreement_no = d['external_agreement_no']
if 'external_logon_id' in d:
o.external_logon_id = d['external_logon_id']
if 'identity_params' in d:
o.identity_params = d['identity_params']
if 'merchant_process_url' in d:
o.merchant_process_url = d['merchant_process_url']
if 'pass_params' in d:
o.pass_params = d['pass_params']
if 'period_rule_params' in d:
o.period_rule_params = d['period_rule_params']
if 'personal_product_code' in d:
o.personal_product_code = d['personal_product_code']
if 'prod_params' in d:
o.prod_params = d['prod_params']
if 'product_code' in d:
o.product_code = d['product_code']
if 'promo_params' in d:
o.promo_params = d['promo_params']
if 'sign_scene' in d:
o.sign_scene = d['sign_scene']
if 'sign_validity_period' in d:
o.sign_validity_period = d['sign_validity_period']
if 'specified_asset' in d:
o.specified_asset = d['specified_asset']
if 'specified_sort_channel_params' in d:
o.specified_sort_channel_params = d['specified_sort_channel_params']
if 'sub_merchant' in d:
o.sub_merchant = d['sub_merchant']
if 'third_party_type' in d:
o.third_party_type = d['third_party_type']
if 'user_age_range' in d:
o.user_age_range = d['user_age_range']
if 'zm_auth_params' in d:
o.zm_auth_params = d['zm_auth_params']
return o
| 38.769042 | 109 | 0.654541 |
55359e05d27b5a1b86a8b1eb2dbded8d5d684ef1 | 1,581 | py | Python | tests/test_word_rtd.py | hasibaasma/alfpy | c8c0c1300108015746320cede2207ac57e630d3e | [
"MIT"
] | 19 | 2017-02-20T17:42:02.000Z | 2021-12-16T19:07:17.000Z | tests/test_word_rtd.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 3 | 2018-03-12T23:54:27.000Z | 2020-12-09T21:53:19.000Z | tests/test_word_rtd.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 6 | 2016-12-06T09:12:04.000Z | 2021-09-24T14:40:47.000Z | import unittest
from alfpy import word_pattern
from alfpy import word_rtd
from alfpy.utils import distmatrix
from . import utils
class Test(unittest.TestCase, utils.ModulesCommonTest):
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
utils.ModulesCommonTest.set_test_data()
self.pep_2mer_pos = word_pattern.create(
self.pep_records.seq_list, 2, True)
def test_calc_rtd(self):
seq = 'CTACACAACTTTGCGGGTAGCCGGAAACATTGTGAATGCGGTGAACA'
apos = [i for i, nt in enumerate(seq) if nt == 'A']
val = word_rtd.calc_rtd(apos)
exp = (3.3846153846153846, 3.1510306381944679)
self.assertEqual(val, exp)
def test_create_vector(self):
vec = word_rtd.create_vector(self.pep_records.count, self.pep_2mer_pos)
exp = (self.pep_records.count, len(self.pep_2mer_pos.pat_list)*2)
self.assertEqual(vec.shape, exp)
def test_distance(self):
vec = word_rtd.create_vector(self.pep_records.count, self.pep_2mer_pos)
dist = word_rtd.Distance(vec, 'google')
matrix = distmatrix.create(self.pep_records.id_list, dist)
exp = [
" 4",
"seq1 0.0000000 0.4892241 0.6034483 0.9310345",
"seq2 0.4892241 0.0000000 0.3673469 0.8802817",
"seq3 0.6034483 0.3673469 0.0000000 0.8843537",
"seq4 0.9310345 0.8802817 0.8843537 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(exp))
if __name__ == '__main__':
unittest.main()
| 34.369565 | 79 | 0.646426 |
defa6f1e58618068f279f3f97f49bbac41760ade | 13,147 | py | Python | src/model.py | jahrmarkt/TowerGame | 7d15b72a2dd7bdd1bb31795129d1261c021e07e4 | [
"MIT"
] | null | null | null | src/model.py | jahrmarkt/TowerGame | 7d15b72a2dd7bdd1bb31795129d1261c021e07e4 | [
"MIT"
] | null | null | null | src/model.py | jahrmarkt/TowerGame | 7d15b72a2dd7bdd1bb31795129d1261c021e07e4 | [
"MIT"
] | null | null | null | from pygame.sprite import *
from pygame.locals import *
from pygame.mixer import *
from math import *
from random import randint
from enum import Enum
from tower import *
from towerData import *
# definitions
class Ground(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.image = loadSprite("grass" + str(randint(1, 4)))
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
class City(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.image = spriteCity2
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.maxHealth = 200
self.health = self.maxHealth
class Enemy(Sprite):
def __init__(self, x, y, enemyType, sprite, speed, damage, health, ):
Sprite.__init__(self)
self.image = sprite
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.vx = 0
self.vy = speed
self.x = x # use this because img.rect.centerx is int and not float
self.y = y
self.enemyType = enemyType
self.health = health
self.speed = speed
self.damage = damage
self.onAttackPosition = False
self.attackTimer = 0
self.attackTime = 10
def update(self):
self.x += float(self.vx)
self.y += float(self.vy)
self.rect.centerx = self.x
self.rect.centery = self.y
if self.onAttackPosition:
self.attackTimer = self.attackTimer + 1
# Level
# slot = list of spawns
# spawn = (time, column, enemyType)
class LevelSlot(object):
def __init__(self, spawns):
self.spawns = spawns
class Level(object):
def __init__(self):
self.city = City(100, 100)
self.slots = []
self.slotTime = 20
# spawn = (time, column, enemyType)
def makeLevel1():
level1 = Level()
level1.city = City(232, 640)
level1.slotTime = 10
slots = [(0, 0), (1, 1), (2, 2), (3, 3), (2, 4), (1, 5), (0, 6), (4, 7)]
slots2 = [(0, 2), (5, 3), (9, 4)]
slots3 = [(0, 2), (0, 5)]
slots4 = [(0, 2), (5, 5), (7, 9)]
foo = lambda x: x + (EnemyType.NORMAL,)
foo2 = lambda x: x + (EnemyType.BIG,)
foo3 = lambda x: x + (EnemyType.HORSE,)
foo4 = lambda x: x + (EnemyType.GHOST,)
slots = list(map(foo, slots))
slots2 = list(map(foo2, slots2))
slots3 = list(map(foo3, slots3))
slots4 = list(map(foo4, slots4))
# try slot + EnemyType.NORMAL
level1.slots = \
[LevelSlot(slots)
, LevelSlot(slots2)
, LevelSlot(slots3)
, LevelSlot(slots)
, LevelSlot(slots2)
, LevelSlot(slots)
, LevelSlot(slots4)
, LevelSlot(slots3)
]
return level1
def spawnEnemies(level, time):
time = time / 50
enemies = []
# get Slot and relative Time
slotIndex = int(time / int(level.slotTime)) % len(level.slots)
relTime = (time % level.slotTime)
# print(slotIndex)
slot = level.slots[slotIndex]
# getSpawns and add them to Enemies
for s in slot.spawns:
(t, col, enemyType) = s
(sprite, speed, damage, health) = enemyList[enemyType]
if t == relTime:
# Enemy(x,y,speed, damage, AttackTime)
enemies.append(Enemy(col * 64 - 32, 0, enemyType, sprite, speed, damage, health))
return enemies
class TowerMenuItem(Sprite):
def __init__(self, x, y, image, towerType):
Sprite.__init__(self)
self.image = image
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
self.rect.size = 62, 62
self.towerType = towerType
class TowerMenu(object):
def __init__(self):
self.items = []
self.activeItemIndex = 0
x = 448
y = 0
for index, key in enumerate(towerList):
(spr, _, _, _, _, _, icon) = towerList[key]
self.items.append(TowerMenuItem(x, y + index * 64, icon, key))
def draw(self, screen):
for index, it in enumerate(self.items):
col = (0, 0, 0) if index != self.activeItemIndex else (0, 200, 0)
pl = [it.rect.topleft, it.rect.topright, it.rect.bottomright, it.rect.bottomleft]
pygame.draw.lines(screen, col, True, pl, 2)
s = RenderPlain(self.items)
s.draw(screen)
def getTowerKey(self, cursor):
for index, i in enumerate(self.items):
if i.rect.collidepoint(cursor):
self.activeItemIndex = index
return self.items[self.activeItemIndex].towerType
return None
class RandomTowerMenu(object):
def __init__(self):
self.items = []
self.activeItemIndex = 0
self.maxLength = 8
self.x = 448 # topleft
self.y = 0 # topleft
def addRandomItem(self, input):
if len(self.items) == self.maxLength:
return
index = randint(0, len(input) - 1)
key = input[index]
numItems = len(self.items)
(spr, _, _, _, _, _, icon) = towerList[key]
self.items.append(TowerMenuItem(self.x, self.y + numItems * 64, icon, key))
self.activeItemIndex = numItems
def addItem(self, key):
if len(self.items) == self.maxLength:
return
numItems = len(self.items)
(spr, _, _, _, _, _, icon) = towerList[key]
self.items.append(TowerMenuItem(self.x, self.y + numItems * 64, icon, key))
self.activeItemIndex = numItems
def removeActiveItem(self):
if (len(self.items) > 0):
del self.items[self.activeItemIndex]
# delete holes in list and reset position
self.items = [item for item in self.items if item is not None]
for index, item in enumerate(self.items):
item.rect.y = self.y + index * 64
def draw(self, screen):
for index, it in enumerate(self.items):
col = (0, 0, 0) if index != self.activeItemIndex else (0, 200, 0)
pl = [it.rect.topleft, it.rect.topright, it.rect.bottomright, it.rect.bottomleft]
pygame.draw.lines(screen, col, True, pl, 2)
s = RenderPlain(self.items)
s.draw(screen)
def getTowerKey(self, cursor):
for index, i in enumerate(self.items):
if i.rect.collidepoint(cursor):
self.activeItemIndex = index
return self.items[self.activeItemIndex].towerType
return None
# gerichter Graph
# pro Tower eine Liste von Connections die auf ihn zeigen
# nicht umgekehrt
class TowerGraph(object):
def __init__(self):
self.connections = []
def addConnection(self, index, tower):
self.connections[index].append(tower)
def removeConnectionOfBoostTower(self, towerIndex):
for index, c in enumerate(self.connections):
self.connections[index] = list(filter(lambda x: x != towerIndex, c))
def addTower(self):
self.connections.append([])
# !!! only call for non boost towers otherwise endless loop
def getAllTowers(self, index):
flatten = lambda l: [item for sublist in l for item in sublist]
def getTowers(index):
newTowers = self.connections[index]
if not newTowers:
return []
else:
return newTowers + flatten(map(getTowers, newTowers))
return getTowers(index)
class Input(object):
def __init__(self):
self.mouseLeftPressed = False
self.mouseRightPressed = False
self.mouseLeftReleased = False
self.cursor = (0, 0)
self.returnKey = False
self.timer = 0
self.numberPressed = False
self.numberKey = 0
def reset(self): # reset all except cursor
self.mouseLeftPressed = False
self.mouseRightPressed = False
self.mouseLeftReleased = False
self.returnKey = False
self.timer = 0
self.numberPressed = False
self.numberKey = 0
def setCell(x, y, gridWidth, grid, v):
grid[y * gridWidth + x] = v
def getCell(x, y, gridWidth, grid):
return grid[y * gridWidth + x]
# enums
class CellType(Enum):
FREE = 1
OCCUPIED = 2
FOUNDATION = 3
CITY = 4
class EnemyType(Enum):
NORMAL = 1
BIG = 2
HORSE = 3
GHOST = 4
class GameState(Enum):
RUN = 1
RESTART = 2
PAUSE = 3
INTRO = 4
allTowerTypes = [TowerType.ROCKY, TowerType.FIRE, TowerType.ROCKY, TowerType.HIGH, TowerType.HIGHBLUE,
TowerType.ROCKYBOOST, TowerType.YELLOW, TowerType.LIBRARY]
class Model(object):
def __init__(self, screenWidth, screenHeight):
startLevel = makeLevel1()
self.level = startLevel
# tower menu
self.towerMenu = RandomTowerMenu()
for a in allTowerTypes:
self.towerMenu.addItem(a)
# self.towerMenu.addItem(TowerType.HIGHBLUE)
# self.towerMenu.addItem(TowerType.HIGH)
# self.towerMenu.addItem(TowerType.ROCKY)
# Entities
self.towers = []
self.projectiles = []
self.enemies = []
self.effects = []
self.city = startLevel.city
# gameplay
self.towerGraph = TowerGraph()
self.spawnrate = 60
self.towerSpawnRate = 1600
self.towerSpawnTimer = 0
self.enemiesCreated = 0
self.enemiesKilled = 0
self.boostRange = 150
# game related
self.gameState = GameState.RUN
self.towerListKey = self.towerMenu.getTowerKey((450, 1))
self.activeTower = None
self.activeTowerIndex = None
self.pressOnBoost = False # for picking boost target
self.lastPressPosition = None
self.mouseLeftDown = False
self.screenWidth = screenWidth
self.screenHeight = screenHeight
self.gridWidth = 7
self.gridHeight = 11
self.gridSize = 64
self.groundSprites = []
self.foundationImage = spriteFoundation3
for x in range(14):
for y in range(22):
self.groundSprites.append(Ground(x * 32, y * 32))
self.grid = []
for x in range(self.gridWidth * self.gridHeight):
self.grid.append(CellType.FREE)
# set City
setCell(2, 9, self.gridWidth, self.grid, CellType.CITY)
setCell(3, 9, self.gridWidth, self.grid, CellType.CITY)
setCell(4, 9, self.gridWidth, self.grid, CellType.CITY)
setCell(2, 10, self.gridWidth, self.grid, CellType.CITY)
setCell(3, 10, self.gridWidth, self.grid, CellType.CITY)
setCell(4, 10, self.gridWidth, self.grid, CellType.CITY)
# setStartFoundation
# setCell(2, 6, self.gridWidth, self.grid, CellType.FOUNDATION)
setCell(2, 7, self.gridWidth, self.grid, CellType.FOUNDATION)
setCell(3, 6, self.gridWidth, self.grid, CellType.FOUNDATION)
# setCell(3, 7, self.gridWidth, self.grid, CellType.FOUNDATION)
# setCell(4, 6, self.gridWidth, self.grid, CellType.FOUNDATION)
setCell(4, 7, self.gridWidth, self.grid, CellType.FOUNDATION)
# assets
# Sprites
def loadSprite(n):
filename = "assets/sprites/" + n + ".gif"
return pygame.image.load(filename).convert_alpha()
def loadSprite2(n):
filename = "assets/sprites/" + n + ".png"
return pygame.image.load(filename).convert_alpha()
spriteAttack = loadSprite("attack1")
spriteCity = loadSprite("city1")
spriteCity2 = pygame.transform.scale(loadSprite2("city1"), (3 * 64, 128))
spriteFoundation = loadSprite("foundation1")
spriteFoundation2 = loadSprite("foundation2")
spriteFoundation3 = loadSprite("foundation3")
spriteGrass1 = loadSprite("grass1")
spriteGrass2 = loadSprite("grass2")
spriteGrass3 = loadSprite("grass3")
spriteGrass4 = loadSprite("grass4")
# Animations
def loadAnimation(n, num):
foo = lambda index: "assets/sprites/animations/" + n + str(index) + ".png"
filenames = list(map(foo, range(1, num + 1)))
images = []
for n in filenames:
images.append(pygame.image.load(n).convert_alpha())
return images
animBoost1 = loadAnimation("boost", 6)
animBoost2 = loadAnimation("boosta", 1)
animBoost3 = loadAnimation("boostb", 2)
animBoost4 = loadAnimation("boostc", 15)
animExplosion1 = loadAnimation("explosion", 7)
# Sounds
def loadSound(n):
return pygame.mixer.Sound("assets/sounds/" + n + ".wav")
# Enemy
soundHit = loadSound("hit1")
soundCoin = loadSound("coin1")
soundTower = loadSound("setTower")
soundShoot = loadSound("shoot1")
soundExplosion1 = loadSound("explosion1")
soundHit2 = loadSound("hit2")
soundSelect1 = loadSound("select1")
soundPowerUp = loadSound("powerup2")
spriteEnemy = loadSprite("enemy2")
spriteEnemyBig = loadSprite("enemybig")
spriteEnemyGhost = loadSprite("enemyghost")
spriteEnemyHorse = loadSprite("enemyhorse")
# (sprite, speed, damage, health)
enemyList = \
{EnemyType.NORMAL: (spriteEnemy, 0.8, 1, 5),
EnemyType.BIG: (spriteEnemyBig, 0.1, 4, 15),
EnemyType.HORSE: (spriteEnemyHorse, 0.6, 2, 3),
EnemyType.GHOST: (spriteEnemyGhost, 0.3, 2, 3),
}
| 28.09188 | 102 | 0.610481 |
973dd05c60b3c57005ca4bf508478e6066ae102b | 28,573 | py | Python | ClinicalTransformerNER/src/transformer_ner/model.py | brajagopalcse/SDoH_NLPend2end | 28743f45b3e6880624a48db4eeb6bdf3e3f4e27a | [
"MIT"
] | 5 | 2021-09-03T13:07:36.000Z | 2022-01-06T03:22:09.000Z | ClinicalTransformerNER/src/transformer_ner/model.py | brajagopalcse/SDoH_NLPend2end | 28743f45b3e6880624a48db4eeb6bdf3e3f4e27a | [
"MIT"
] | 4 | 2021-09-03T01:31:04.000Z | 2022-01-13T15:11:09.000Z | ClinicalTransformerNER/src/transformer_ner/model.py | brajagopalcse/SDoH_NLPend2end | 28743f45b3e6880624a48db4eeb6bdf3e3f4e27a | [
"MIT"
] | 1 | 2022-01-06T16:59:46.000Z | 2022-01-06T16:59:46.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
We tried to implement a common class BertLikeNER for BERT, ROBERTA, ALBERT, DISTILBERT
to share the common forward() function;
However, such implementation will dramatically influence the model converge process.
The current implementation has repeated code but will guarantee the performance for each model.
"""
from transformers import (BertConfig, BertModel, BertPreTrainedModel,
RobertaModel, RobertaConfig, PreTrainedModel,
XLNetModel, XLNetPreTrainedModel, XLNetConfig,
AlbertModel, AlbertConfig,
DistilBertConfig, DistilBertModel,
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BartConfig, BartModel, ElectraForTokenClassification,
ElectraModel, XLNetForTokenClassification, AlbertPreTrainedModel,
RobertaForTokenClassification, LongformerForTokenClassification, LongformerModel,
DebertaModel, DebertaPreTrainedModel)
from torch import nn
import torch
class Transformer_CRF(nn.Module):
def __init__(self, num_labels, start_label_id):
super().__init__()
self.num_labels = num_labels
self.start_label_id = start_label_id
self.transitions = nn.Parameter(torch.randn(self.num_labels, self.num_labels), requires_grad=True)
self.log_alpha = nn.Parameter(torch.zeros(1, 1, 1), requires_grad=False)
self.score = nn.Parameter(torch.zeros(1, 1), requires_grad=False)
self.log_delta = nn.Parameter(torch.zeros(1, 1, 1), requires_grad=False)
self.psi = nn.Parameter(torch.zeros(1, 1, 1), requires_grad=False)
self.path = nn.Parameter(torch.zeros(1, 1, dtype=torch.long), requires_grad=False)
@staticmethod
def log_sum_exp_batch(log_Tensor, axis=-1):
# shape (batch_size,n,m)
return torch.max(log_Tensor, axis)[0] + \
torch.log(torch.exp(log_Tensor - torch.max(log_Tensor, axis)[0].view(log_Tensor.shape[0], -1, 1)).sum(axis))
def reset_layers(self):
self.log_alpha = self.log_alpha.fill_(0.)
self.score = self.score.fill_(0.)
self.log_delta = self.log_delta.fill_(0.)
self.psi = self.psi.fill_(0.)
self.path = self.path.fill_(0)
def forward(self, feats, label_ids):
forward_score = self._forward_alg(feats)
max_logLL_allz_allx, path, gold_score = self._crf_decode(feats, label_ids)
loss = torch.mean(forward_score - gold_score)
####
# print(forward_score)
# print(gold_score)
# print(max_logLL_allz_allx)
# print(path)
####
self.reset_layers()
return path, max_logLL_allz_allx, loss
def _forward_alg(self, feats):
"""alpha-recursion or forward recursion; to compute the partition function"""
# feats -> (batch size, num_labels)
seq_size = feats.shape[1]
batch_size = feats.shape[0]
log_alpha = self.log_alpha.expand(batch_size, 1, self.num_labels).clone().fill_(-10000.)
log_alpha[:, 0, self.start_label_id] = 0
for t in range(1, seq_size):
log_alpha = (self.log_sum_exp_batch(self.transitions + log_alpha, axis=-1) + feats[:, t]).unsqueeze(1)
return self.log_sum_exp_batch(log_alpha)
def _crf_decode(self, feats, label_ids):
seq_size = feats.shape[1]
batch_size = feats.shape[0]
batch_transitions = self.transitions.expand(batch_size, self.num_labels, self.num_labels)
batch_transitions = batch_transitions.flatten(1)
score = self.score.expand(batch_size, 1)
log_delta = self.log_delta.expand(batch_size, 1, self.num_labels).clone().fill_(-10000.)
log_delta[:, 0, self.start_label_id] = 0
psi = self.psi.expand(batch_size, seq_size, self.num_labels).clone()
for t in range(1, seq_size):
score = score + \
batch_transitions.gather(-1, (label_ids[:, t] * self.num_labels + label_ids[:, t-1]).view(-1, 1)) + \
feats[:, t].gather(-1, label_ids[:, t].view(-1, 1)).view(-1, 1)
log_delta, psi[:, t] = torch.max(self.transitions + log_delta, -1)
log_delta = (log_delta + feats[:, t]).unsqueeze(1)
# trace back
path = self.path.expand(batch_size, seq_size).clone()
max_logLL_allz_allx, path[:, -1] = torch.max(log_delta.squeeze(), -1)
for t in range(seq_size-2, -1, -1):
path[:, t] = psi[:, t+1].gather(-1, path[:, t+1].view(-1, 1)).squeeze()
return max_logLL_allz_allx, path, score
class BertLikeNerModel(PreTrainedModel):
"""not fit for the current training; but can be integrated into new APP"""
CONF_REF = {
'bert': (BertConfig, ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, 'bert'),
'roberta': (RobertaConfig, ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, 'roberta'),
'albert': (AlbertConfig, ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, 'albert')
}
def __init__(self, config, model_type):
super().__init__(config)
self.model_type = model_type
self.num_labels = config.num_labels
self.model = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.__prepare_model_instance(config)
self.init_weights()
def __prepare_model_instance(self, config):
self.config_class, self.pretrained_model_archive_map, self.base_model_prefix = self.CONF_REF[self.model_type]
if self.model_type == "bert":
self.model = BertModel(config)
elif self.model_type == 'roberta':
self.model = RobertaModel(config)
elif self.model_type == 'albert':
self.model = AlbertModel(config)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, label_ids=None):
outputs = self.model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
# loss_fct = nn.CrossEntropyLoss() # CrossEntropyLoss has log_softmax operation inside
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class BertNerModel(BertPreTrainedModel):
"""
model architecture:
(bert): BertModel
(dropout): Dropout(p=0.1, inplace=False)
(classifier): Linear(in_features=768, out_features=12, bias=True)
(loss_fct): CrossEntropyLoss()
(crf_layer): Transformer_CRF()
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, label_ids=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
# loss_fct = nn.CrossEntropyLoss()
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class RobertaNerModel(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, label_ids=None):
"""
:return: raw logits without any softmax or log_softmax transformation
qoute for reason (https://discuss.pytorch.org/t/logsoftmax-vs-softmax/21386/7):
You should pass raw logits to nn.CrossEntropyLoss, since the function itself applies F.log_softmax and nn.NLLLoss() on the input.
If you pass log probabilities (from nn.LogSoftmax) or probabilities (from nn.Softmax()) your loss function won’t work as intended.
From the pytorch CrossEntropyLoss doc:
The input is expected to contain raw, unnormalized scores for each class.
If apply CRF, we cannot use CrossEntropyLoss but instead using NLLLoss ()
"""
outputs = self.roberta(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
seq_outputs = outputs[0]
seq_outputs = self.dropout(seq_outputs)
logits = self.classifier(seq_outputs)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class LongformerNerModel(LongformerForTokenClassification):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
label_ids=None,
output_attentions=None,
output_hidden_states=None):
outputs = self.longformer(input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
# loss_fct = nn.CrossEntropyLoss()
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class AlbertNerModel(AlbertPreTrainedModel):
# config_class = AlbertConfig
# pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
# base_model_prefix = 'albert'
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, label_ids=None):
outputs = self.albert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
seq_outputs = outputs[0]
seq_outputs = self.dropout(seq_outputs)
logits = self.classifier(seq_outputs)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class DistilBertNerModel(BertPreTrainedModel):
config_class = DistilBertConfig
pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = 'distilbert'
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, label_ids=None):
outputs = self.distilbert(input_ids,
attention_mask=attention_mask,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class XLNetNerModel(XLNetForTokenClassification):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.xlnet = XLNetModel(config)
self.classifier = nn.Linear(config.d_model, self.num_labels)
self.dropout = nn.Dropout(config.dropout)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
raise Warning("Not support CRF for XLNet for now.")
# self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
self.crf_layer = None
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
label_ids=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.xlnet(input_ids=input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
seq_outputs = outputs[0]
seq_outputs = self.dropout(seq_outputs)
logits = self.classifier(seq_outputs)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class BartNerModel(PreTrainedModel):
"""
According to https://arxiv.org/pdf/1910.13461.pdf section 3.2,
the token classification tasks use the top decoder hidden state.
We will adopt their implementation only using the decoder (dco) for classification,
we do provide the option to concat encoder output with decoder output.
"""
config_class = BartConfig
base_model_prefix = "bart"
pretrained_model_archive_map = {"bart-large": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large/pytorch_model.bin"}
def __init__(self, config, output_concat=False):
super().__init__(config)
self.num_labels = config.num_labels
self.bart = BartModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.d_model, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.output_concat = output_concat
self.init_weights()
def _init_weights(self, module):
std = self.config.init_std
# called init_bert_params in fairseq
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, encoder_outputs=None, decoder_attention_mask=None, decoder_cached_states=None, label_ids=None):
# dco = decoder output; eco = encoder output
dco, eco = self.bart(input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states
)
if self.output_concat:
sequence_output = torch.cat((dco, eco), 2)
else:
sequence_output = dco
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
# loss_fct = nn.CrossEntropyLoss()
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class ElectraNerModel(ElectraForTokenClassification):
"""
model architecture:
(bert): ELECTRA
(dropout): Dropout(p=0.1, inplace=False)
(classifier): Linear(in_features=768, out_features=12, bias=True)
(loss_fct): CrossEntropyLoss()
(crf_layer): Transformer_CRF()
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.electra = ElectraModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
label_ids=None,
output_attentions=None,
output_hidden_states=None):
outputs = self.electra(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
class DeBertaNerModel(DebertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.loss_fct = nn.CrossEntropyLoss()
self.use_crf = config.use_crf
if self.use_crf:
self.crf_layer = Transformer_CRF(num_labels=config.num_labels, start_label_id=config.label2idx['CLS'])
else:
self.crf_layer = None
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
label_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None
):
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if self.use_crf:
logits, active_logits, loss = self.crf_layer(logits, label_ids)
else:
if attention_mask is not None:
active_idx = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_idx]
active_labels = label_ids.view(-1)[active_idx]
else:
active_logits = logits.view(-1, self.num_labels)
active_labels = label_ids.view(-1)
loss = self.loss_fct(active_logits, active_labels)
return logits, active_logits, loss
| 42.33037 | 173 | 0.615476 |
ca6386853500efe64b8aff0fa3e8b15fae1acc55 | 409 | py | Python | roastery/coffee/migrations/0002_bean_description.py | kylehorton33/roastery | be0df40459ab3da7e72a5d52f094e7d1d51e63d0 | [
"MIT"
] | null | null | null | roastery/coffee/migrations/0002_bean_description.py | kylehorton33/roastery | be0df40459ab3da7e72a5d52f094e7d1d51e63d0 | [
"MIT"
] | null | null | null | roastery/coffee/migrations/0002_bean_description.py | kylehorton33/roastery | be0df40459ab3da7e72a5d52f094e7d1d51e63d0 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2021-10-26 22:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coffee', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bean',
name='description',
field=models.TextField(blank=True, verbose_name='Description of Bean'),
),
]
| 21.526316 | 83 | 0.603912 |
45329f1467fbc13a4797f7eee2e4410dd5f63d67 | 114 | py | Python | src/bioregistry/align/__main__.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | 2 | 2020-12-23T15:23:51.000Z | 2021-02-13T17:50:34.000Z | src/bioregistry/align/__main__.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | 19 | 2020-12-27T14:12:38.000Z | 2021-03-29T20:20:42.000Z | src/bioregistry/align/__main__.py | egonw/bioregistry | 5070e0310a4e9f695d9089b302e0a2421c155d02 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""CLI for alignment."""
from .cli import align
if __name__ == "__main__":
align()
| 12.666667 | 26 | 0.578947 |
825a7064531e5306f008c3a25c8546bc9324a9c5 | 33,590 | py | Python | veles/backends.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 1,007 | 2015-07-20T12:01:41.000Z | 2022-03-30T23:08:35.000Z | veles/backends.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 52 | 2015-07-21T10:26:24.000Z | 2019-01-24T05:46:43.000Z | veles/backends.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 235 | 2015-07-20T09:42:42.000Z | 2021-12-06T18:12:26.000Z | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Mar 21, 2013
OpenCL base classes.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import argparse
import cuda4py as cu
import cuda4py.blas as cublas
import gc
from importlib import import_module
import json
import numpy
import opencl4py as cl
import os
from psutil import virtual_memory
from six import add_metaclass
import sys
from threading import current_thread
from veles.cmdline import CommandLineArgumentsRegistry
from veles.compat import from_none
from veles.config import root
from veles.distributable import Pickleable
from veles.logger import Logger
import veles.opencl_types as opencl_types
import veles.external.prettytable as prettytable
PYVER = sys.version_info[0]
class DeviceInfo(Logger):
"""Info about device.
Attributes:
desc: Description of the device.
memsize: "available" size of the memory on the device.
memalign: best alignment for device buffers.
version: OpenCL version.
rating: in [0, 1] interval (1 - fastest, 0.5 - 50% slower than fastest,
0 - unrated).
device_info: contains block sizes for different kernel types.
"""
def __init__(self, **kwargs):
super(DeviceInfo, self).__init__()
self.desc = kwargs["desc"]
self.memsize = kwargs["memsize"]
self.memalign = kwargs["memalign"]
self.version = kwargs["version"]
self.device_type = kwargs["device_type"]
self.max_work_group_size = kwargs["max_work_group_size"]
self.max_work_item_sizes = kwargs["max_work_item_sizes"]
self.local_memsize = kwargs["local_memsize"]
self.rating = {}
self.device_info = {}
def get_kernel_bs_vo(self, **kwargs):
"""Gets optimal block size and vector_opt
flag for matrix multiplication.
Parameters:
dtype: numeric data type as string (float or double).
kernel: hint for the name of the kernel for which the optimal
block sizes will be returned:
conv: convolutional forward propagation,
deconv: convolutional back propagation,
all other: simple matrix multiplication.
precision: precision level for summation (0, 1, 2)
(defaults to root.common.engine.precision_level).
Returns:
BLOCK_SIZE, VECTOR_OPT
"""
dtype = kwargs["dtype"]
if not isinstance(dtype, str):
dtype = opencl_types.numpy_dtype_to_opencl(dtype)
krnnme = kwargs.get("kernel", "matrix_multiplication")
precision = kwargs.get("precision", root.common.engine.precision_level)
krninfo = self.device_info.get(krnnme)
if krninfo is None:
# Benchmark for other kernel types is not implemented,
# so only debug level here
self.debug(
"Kernel \"%s\" was not found, "
"rolling back to block size for matrix_multiplication",
krnnme)
krnnme = "matrix_multiplication"
krninfo = self.device_info.get(krnnme)
if krninfo is None:
bs = 8
self.warning(
"krnnme = %s was not found, "
"will use block size %d", krnnme, bs)
return bs, False
typeinfo = krninfo.get(dtype)
if typeinfo is None:
bs = 8
self.warning(
"dtype = %s was not found with krnnme = %s, "
"will use block size %d", dtype, krnnme, bs)
return bs, False
bs_dt = typeinfo.get(str(precision))
while bs_dt is None and precision > 0:
precision -= 1
bs_dt = typeinfo.get(str(precision))
if bs_dt is None:
bs = 8
self.warning(
"precision = 0 was not found with krnnme = %s and dtype = %s, "
"will use block size %d", krnnme, dtype, bs)
return bs, False
return bs_dt[0], bs_dt[1]
def get_max_block_size(self, dtype, vector_opt):
itemsize = {"float": 4, "double": 8}[dtype]
sz = int(numpy.sqrt(self.max_work_group_size))
sh = self.max_work_item_sizes
bs = min(sz, sh[0], sh[1])
while bs * bs * 2 * itemsize > self.local_memsize:
bs -= 1
if vector_opt: # round down to 4
bs >>= 2
bs <<= 2
return bs
@property
def is_cpu(self):
return self.device_type == cl.CL_DEVICE_TYPE_CPU
class DeviceNotFoundError(Exception):
pass
class BackendRegistry(CommandLineArgumentsRegistry):
backends = {}
def __init__(cls, name, bases, clsdict):
super(BackendRegistry, cls).__init__(name, bases, clsdict)
try:
BackendRegistry.backends[clsdict["BACKEND"]] = cls
except KeyError:
raise from_none(KeyError("%s does not define BACKEND" % cls))
assert "PRIORITY" in clsdict, "%s does not define PRIORITY" % cls
@staticmethod
def backends_as_str():
return ", ".join("\"%s\" for %s" % (k, v.__name__) for k, v in sorted(
BackendRegistry.backends.items()))
@add_metaclass(CommandLineArgumentsRegistry)
class Device(Pickleable):
"""Base device class.
Attributes:
_pid_: process id.
"""
def __new__(cls, *args, **kwargs):
assert issubclass(cls, Device)
backend = kwargs.get(
"backend", os.getenv("VELES_BACKEND", root.common.engine.backend))
cls = BackendRegistry.backends[backend]
if cls.__new__ != Device.__new__:
return cls.__new__(cls, *args, **kwargs)
return object.__new__(cls)
def init_unpickled(self):
super(Device, self).init_unpickled()
self._pid_ = os.getpid()
self._thread_pool_detach_callbacks_ = {}
self._temp_buffer_size = 0
self._temp_buffer_ = None
def __del__(self):
for pool in dict(self._thread_pool_detach_callbacks_):
self.thread_pool_detach(pool)
@property
def backend_name(self):
"""Returns name of the backend.
"""
return type(self).BACKEND
@property
def id(self):
return None
@property
def pid(self):
"""Process ID.
"""
return self._pid_
@property
def blas(self):
"""Returns BLAS instance.
"""
return None
@property
def is_async(self):
return type(self).ASYNC
def sync(self):
"""Synchronizes the device execution queue.
"""
pass
def is_attached(self, thread_pool):
return thread_pool in self._thread_pool_detach_callbacks_
def assign_backend_methods(self, obj, backend_methods):
"""Scans backends class hierarchy and assigns found methods to obj.
"""
for suffix in backend_methods:
# Scan class hierarchy for backend method
checked = [] # this is just for exception message
for cls in type(self).mro():
backend = getattr(cls, "BACKEND", None)
if backend is None:
continue
checked.append(cls)
backend_method = getattr(obj, backend + "_" + suffix, None)
if backend_method is not None:
break
else:
raise AttributeError(
"No implementation of %s with backends %s found in %s " %
(suffix, checked, type(obj)))
setattr(obj, "_backend_" + suffix + "_", backend_method)
def thread_pool_attach(self, thread_pool):
if thread_pool in self._thread_pool_detach_callbacks_:
self.warning("Already is_attached to %s", thread_pool)
return
self._register_thread_pool_callbacks(thread_pool)
def detach():
self.thread_pool_detach(thread_pool)
self._thread_pool_detach_callbacks_[thread_pool] = detach
thread_pool.register_on_shutdown(detach)
def thread_pool_detach(self, thread_pool):
if thread_pool not in self._thread_pool_detach_callbacks_:
self.warning("Unable to detach from %s: not attached", thread_pool)
return
thread_pool.unregister_on_shutdown(
self._thread_pool_detach_callbacks_[thread_pool])
del self._thread_pool_detach_callbacks_[thread_pool]
self._unregister_thread_pool_callbacks(thread_pool)
def _register_thread_pool_callbacks(self, pool):
"""Registers callbacks for the thread pool.
"""
# Important! Save the bound method to variable to avoid dead weak refs
# See http://stackoverflow.com/questions/19443440/weak-reference-to-python-class-method # nopep8
self._on_thread_enter_ = self._on_thread_enter
self._on_thread_exit_ = self._on_thread_exit
pool.register_on_thread_enter(self._on_thread_enter_)
pool.register_on_thread_exit(self._on_thread_exit_)
def _unregister_thread_pool_callbacks(self, pool):
pool.unregister_on_thread_enter(self._on_thread_enter)
pool.unregister_on_thread_exit(self._on_thread_exit)
@staticmethod
def iterparse(text):
bpos = text.find(':')
if bpos < 1:
raise ValueError("Invalid devices definition: %s" % text)
backend, devs = text[:bpos], text[bpos + 1:]
multidevs = devs.split('x')
for _ in range(1 if len(multidevs) == 1 else int(multidevs[1])):
for d in BackendRegistry.backends[backend].iterparse(multidevs[0]):
yield d
def _on_thread_enter(self):
"""Called justed after the new thread has been created
in the thread pool.
"""
pass
def _on_thread_exit(self):
"""Called just before the thread will be terminated.
"""
pass
@property
def exists(self):
"""Returns True if device is ready for use.
"""
return False
@staticmethod
def arg_completer(prefix, **kwargs):
def format_device(plf, dev):
return "%s - %s on %s" % (dev.path, dev.name.strip(), plf.name)
if prefix.strip() == "":
platforms = cl.Platforms().platforms
if len(platforms) == 1 and len(platforms[0].devices) == 1:
return ["0:0"]
result = []
for platform in platforms:
for device in platform:
result.append(format_device(platform, device))
return result
parsed = [p for p in prefix.split(':') if p.strip() != ""]
platform = cl.Platforms().platforms[int(parsed[0].strip())]
if len(parsed) == 1:
if len(platform.devices) == 1:
return [platform.devices[0].path]
result = []
for device in platform:
result.append(format_device(platform, device))
return result
@staticmethod
def init_parser(**kwargs):
parser = kwargs.get("parser", argparse.ArgumentParser())
def set_backend(name):
if name not in BackendRegistry.backends:
raise ValueError(
"Insupported backend name: %s. Choose any from %s." %
(name, BackendRegistry.backends_as_str()))
root.common.engine.backend = name
parser.add_argument(
"-d", "--device", type=str, default="",
help="Device ID to use. E.g. 0:1 for OpenCL or 1 for CUDA.") \
.completer = Device.arg_completer
parser.add_argument(
"-a", "--backend", type=set_backend, default="auto",
help="Acceleration backend to use. Currently supported values are "
"%s." % BackendRegistry.backends_as_str())
return parser
@staticmethod
def parse_device(**kwargs):
parser = Device.init_parser(**kwargs)
args, _ = parser.parse_known_args(Device.class_argv)
return args.device
def _alloc_temp_buffer(self, size):
"""Allocates device-specific temporary buffer.
"""
raise NotImplementedError()
def request_temp_buffer(self, size):
"""Tries to allocate the device specific temporary buffer
of max(size, previous max size of this function call) bytes,
then frees it.
"""
if size & 1:
size += 1
if size > self._temp_buffer_size:
# Try to allocate the memory on the device
buf = self._alloc_temp_buffer(size)
del buf
self._temp_buffer_size = size
def get_temp_buffer(self):
"""Returns allocated device-specific temporary buffer.
"""
if self._temp_buffer_ is None:
self._temp_buffer_ = self._alloc_temp_buffer(
self._temp_buffer_size)
return self._temp_buffer_
@add_metaclass(BackendRegistry)
class AutoDevice(Device):
"""
Overrides __new__() to automatically select the best available Device type.
"""
BACKEND = "auto"
PRIORITY = 0
def __new__(cls, *args, **kwargs):
for cls in sorted(BackendRegistry.backends.values(),
key=lambda b: b.PRIORITY, reverse=True):
if cls.available():
return object.__new__(cls, *args)
assert False, "Impossible because numpy backend is always available"
@staticmethod
def available():
return False
@add_metaclass(BackendRegistry)
class OpenCLDevice(Device):
"""OpenCL device class.
Attributes:
device_info: DeviceInfo object.
context_: OpenCL context handle.
queue_: OpenCL device queue.
pid_: process id.
"""
BACKEND = "ocl"
PRIORITY = 20
DEVICE_INFOS_JSON = "device_infos.json"
ASYNC = True
skip = cl.skip
# Allow this class to be created manually
def __new__(cls, *args):
return object.__new__(cls, *args)
def __init__(self):
super(OpenCLDevice, self).__init__()
self._blas = None
self._id = None
# Workaround for NVIDIA
# (fixes incorrect behaviour with OpenCL binaries)
if os.getenv("CUDA_CACHE_DISABLE") is None:
os.putenv("CUDA_CACHE_DISABLE", "1")
# Workaround for AMD
# (fixes segmentation fault when accessed over ssh with X and
# no X is running or when accessing locally and integrated
# video device is used instead of AMD one)
d = os.getenv("DISPLAY")
if d is not None and d != os.getenv("COMPUTE"):
os.unsetenv("DISPLAY")
# Set 64-bit mode for AMD OpenCL by default
if os.getenv("GPU_FORCE_64BIT_PTR") is None:
os.putenv("GPU_FORCE_64BIT_PTR", "1")
# Get the device
res = self._get_some_device()
# Restore DISPLAY to enable drawing
if d is not None:
os.putenv("DISPLAY", d)
if not res:
return
self._fill_device_info_performance_values()
log_configs = "Selected the following OpenCL configuration:\n"
table = prettytable.PrettyTable("device", " dtype", "rating",
"BLOCK_SIZE", "VECTOR_OPT", "version")
table.align["device"] = "l"
table.align[" dtype"] = "l"
table.align["BLOCK_SIZE"] = "l"
table.align["VECTOR_OPT"] = "l"
for dtype in sorted(opencl_types.dtypes.keys()):
rating = self.device_info.rating.get(dtype)
if rating is None:
rating = ""
else:
rating = "%.3f" % rating
bs_vo = self.device_info.get_kernel_bs_vo(dtype=dtype)
table.add_row(self.device_info.desc, dtype, rating,
bs_vo[0], bs_vo[1], self.device_info.version)
self.info(log_configs + str(table))
@property
def id(self):
return self._id
@property
def blas(self):
return self._blas
@blas.setter
def blas(self, value):
assert self._blas is None, "blas property is not None already"
self._blas = value
@property
def exists(self):
return self.queue_ is not None
def init_unpickled(self):
super(OpenCLDevice, self).init_unpickled()
self.queue_ = None
@property
def max_group_size(self):
return self.queue_.device.max_work_group_size
@staticmethod
def iterparse(text):
oclpnums, ocldevnum = text.split(':')
oclpnum = int(oclpnums)
ocldevarr = ocldevnum.split('-')
ocldevmin = int(ocldevarr[0])
if len(ocldevarr) > 1:
ocldevmax = int(ocldevarr[1])
else:
ocldevmax = ocldevmin
for dev in range(ocldevmin, ocldevmax + 1):
yield OpenCLDevice.BACKEND, "%d:%d" % (oclpnum, dev)
def assign_backend_methods(self, obj, backend_methods):
super(OpenCLDevice, self).assign_backend_methods(obj, backend_methods)
obj.skip_args = cl.skip
def compute_ratings(self, device_infos):
devdt = {}
min_dt = {}
for desc, device_info in sorted(device_infos.items()):
krninfo = device_info.get("matrix_multiplication")
if krninfo is None:
continue
devdt[desc] = {}
for dtype, typeinfo in krninfo.items():
bs_vo_dt = typeinfo.get("0")
if bs_vo_dt is None or len(bs_vo_dt) < 3:
continue
devdt[desc][dtype] = bs_vo_dt[2]
min_dt[dtype] = min(min_dt.get(dtype, 1.0e30), bs_vo_dt[2])
table = prettytable.PrettyTable("device", " dtype", "rating")
table.align["device"] = "l"
table.align[" dtype"] = "l"
rating = {}
for desc, dtypedt in sorted(devdt.items()):
rating[desc] = {}
for dtype, dt in sorted(dtypedt.items()):
rating[desc][dtype] = min_dt[dtype] / dt
table.add_row(desc, dtype, "%.3f" % rating[desc][dtype])
self.debug("Device ratings:\n%s", str(table))
if self.device_info.desc in rating:
self.device_info.rating = rating[self.device_info.desc]
def sync(self):
self.queue_.flush()
self.queue_.finish()
@staticmethod
def available():
try:
return len(cl.Platforms().platforms) > 0
except:
return False
def _get_some_device(self, **kwargs):
"""Gets some device from the available OpenCL devices.
Returns True if any device was selected, otherwise, False.
"""
device = self.parse_device(**kwargs)
try:
platforms = cl.Platforms()
except cl.CLRuntimeError:
platforms = None
if platforms is None or len(platforms.platforms) == 0:
raise DeviceNotFoundError("No OpenCL devices were found")
self._id = device
if device == "":
context = platforms.create_some_context()
else:
platfnum, devnums = device.split(':')
try:
platform = platforms.platforms[int(platfnum)]
except IndexError:
raise from_none(
DeviceNotFoundError("Device %s was not found." % device))
context = platform.create_context(
[platform.devices[int(devnum)]
for devnum in devnums.split(',')])
if "NVIDIA" in context.platform.name:
def fail(*args, **kwargs):
raise RuntimeError("fork() breaks NVIDIA OpenCL")
os.fork = fail
import subprocess
subprocess.Popen = fail
device = context.devices[0]
desc = "%s/%s" % (device.vendor.strip(), device.name.strip())
self.queue_ = context.create_queue(device)
self.device_info = DeviceInfo(
desc=desc, memsize=device.memsize,
memalign=device.memalign, version=device.version,
device_type=device.type,
max_work_group_size=self.queue_.device.max_work_group_size,
max_work_item_sizes=self.queue_.device.max_work_item_sizes,
local_memsize=self.queue_.device.local_memsize)
return True
def _fill_device_info_performance_values(self):
device_infos = {}
found_any = False
for devdir in root.common.engine.device_dirs:
try:
os.makedirs(devdir, 0o755)
except OSError:
pass
device_infos_fnme = os.path.join(devdir,
OpenCLDevice.DEVICE_INFOS_JSON)
try:
with open(device_infos_fnme, "r") as fin:
device_infos.update(json.load(fin))
found_any = True
except IOError:
pass
except ValueError as e:
self.warning("Failed to load %s: %s", device_infos_fnme, e)
if not found_any:
self.warning("Did not find %s in any of the configured paths: %s",
OpenCLDevice.DEVICE_INFOS_JSON,
root.common.engine.device_dirs)
if ((self.device_info.desc not in device_infos and
root.common.engine.test_unknown_device) or
(self.device_info.desc in device_infos and
root.common.engine.test_known_device)):
self.warning("%s, will perform a "
"quick test now.", "Forced device retest"
if self.device_info.desc in device_infos
else "Device has not been analyzed yet")
self._find_optimal_bs_vo(device_infos)
found_any = False
for devdir in root.common.engine.device_dirs:
device_infos_fnme = os.path.join(
devdir, OpenCLDevice.DEVICE_INFOS_JSON)
try:
with open(device_infos_fnme, "w") as fout:
json.dump(device_infos, fout, indent=2, sort_keys=True)
found_any = True
except IOError:
pass
if not found_any:
self.warning("Unable to save the analysis results to any of "
"the configured paths: %s",
root.common.engine.device_dirs)
self.compute_ratings(device_infos)
if self.device_info.desc in device_infos:
self.device_info.device_info = device_infos[self.device_info.desc]
def _find_optimal_bs_vo(self, device_infos):
device_info = device_infos.get(self.device_info.desc, {})
krnnme = "matrix_multiplication"
if krnnme not in device_info:
device_info[krnnme] = {}
# FIXME(v.markovtsev): disable R0401 locally when pylint issue is fixed
# https://bitbucket.org/logilab/pylint/issue/61
# pylint: disable=R0401
dummy = import_module("veles.dummy")
opencl_units = import_module("veles.accelerated_units")
benchmark = opencl_units.DeviceBenchmark
for dtype in root.common.engine.test_precision_types:
if dtype not in device_info[krnnme]:
device_info[krnnme][dtype] = {}
# json wants strings
for precision_level in (
str(p) for p in root.common.engine.test_precision_levels):
min_dt = 1.0e30
for vector_opt in (False, True):
max_block_size = self.device_info.get_max_block_size(
dtype, vector_opt)
min_block_size = 8
if int(vector_opt):
min_block_size >>= 2
min_block_size <<= 2
bs_inc = 4
else:
bs_inc = 1
for block_size in range(min_block_size, max_block_size + 1,
bs_inc):
self.info(
"Testing %s dtype=%s precision_level=%s "
"block_size=%d vector_opt=%s",
krnnme, dtype, precision_level,
block_size, vector_opt)
try:
with dummy.DummyWorkflow() as wf:
u = benchmark(
wf, size=3001, repeats=3,
dtype=dtype,
precision_level=precision_level,
block_size=block_size,
vector_opt=vector_opt,
return_time=True, dry_run_first=True)
u.initialize(self)
dt = u.run()
except cl.CLRuntimeError as e:
self.exception("Failed to evaluate block size %d",
block_size)
if e.code == -5: # CL_OUT_OF_RESOURCES
break
else:
continue
finally:
gc.collect()
if dt < min_dt:
min_dt = dt
device_info[krnnme][dtype][precision_level] = (
block_size, bool(int(vector_opt)), min_dt)
device_infos[self.device_info.desc] = device_info
def _alloc_temp_buffer(self, size):
# Allocate the buffer
buf = self.queue_.context.create_buffer(
cl.CL_MEM_READ_WRITE, size=size)
self.sync()
# Move it to device
self.queue_.copy_buffer(buf, buf, 0, size >> 1, size >> 1)
self.sync()
return buf
@add_metaclass(BackendRegistry)
class CUDADevice(Device):
"""CUDA device class.
Attributes:
_context_: CUDA context handle.
_blas_: dictionary of thread-id => CUBLAS instances.
"""
BACKEND = "cuda"
PRIORITY = 30
ASYNC = True
skip = cu.skip
# Allow this class to be created manually
def __new__(cls, *args):
return object.__new__(cls, *args)
def __init__(self):
super(CUDADevice, self).__init__()
self._context_ = None
self._id = None
self._blas_ = {}
# Get the device
self._get_some_device()
log_configs = "Selected the following CUDA device:\n"
table = prettytable.PrettyTable("device", "mem", "compute", "pci")
table.align["device"] = "l"
table.align["mem"] = "r"
table.align["pci"] = "l"
table.add_row(
self.context.device.name, self.context.device.total_mem // 1048576,
"%d.%d" % self.context.device.compute_capability,
self.context.device.pci_bus_id)
self.info(log_configs + str(table))
@property
def id(self):
return self._id
@property
def context(self):
return self._context_
@property
def exists(self):
return self._context_ is not None
def suggest_block_size(self, krn):
if krn is None:
raise ValueError("Received None as an argument")
_min_grid_size, block_size = krn.max_potential_block_size()
ab_best = krn.max_active_blocks_per_multiprocessor(block_size)
ab = ab_best
# Warp size usually 32 and no improvements over 128 with NVIDIA Titan
min_size = self.context.device.warp_size * 4
best_block_size = None
while (ab >= ab_best and not (block_size & 1) and
block_size >= min_size):
ab_best = ab
best_block_size = block_size
block_size >>= 1
ab = krn.max_active_blocks_per_multiprocessor(block_size)
return best_block_size
def _register_thread_pool_callbacks(self, pool):
super(CUDADevice, self)._register_thread_pool_callbacks(pool)
self.context.push_current()
def _unregister_thread_pool_callbacks(self, pool):
super(CUDADevice, self)._unregister_thread_pool_callbacks(pool)
self.context.pop_current()
def _on_thread_enter(self):
self._context_.push_current()
def _on_thread_exit(self):
tid = current_thread().ident
if tid in self._blas_:
blas = self._blas_.pop(tid)
del blas
self._context_.pop_current()
@property
def blas(self):
tid = current_thread().ident
blas = self._blas_.get(tid)
if blas is None:
blas = cublas.CUBLAS(self.context)
self._blas_[tid] = blas
return blas
@staticmethod
def arg_completer(prefix, **kwargs):
def format_device(dev):
return "%d: %s - %s, %dMb, compute_%d%d, pci %s" % ((
dev.handle, dev.name, dev.total_mem) +
dev.compute_capability + (dev.pci_bus_id,))
devices = cu.Devices()
if len(devices) == 1:
return ["0"]
result = []
for device in devices:
result.append(format_device(device))
return result
@staticmethod
def iterparse(text):
ocldevarr = text.split('-')
ocldevmin = int(ocldevarr[0])
if len(ocldevarr) > 1:
ocldevmax = int(ocldevarr[1])
else:
ocldevmax = ocldevmin
for dev in range(ocldevmin, ocldevmax + 1):
yield CUDADevice.BACKEND, str(dev)
def assign_backend_methods(self, obj, backend_methods):
super(CUDADevice, self).assign_backend_methods(obj, backend_methods)
obj.skip_args = cu.skip
def _get_some_device(self, **kwargs):
"""Gets some device from the available CUDA devices.
Returns True if any device was selected, otherwise, False.
"""
device = self.parse_device(**kwargs)
try:
devices = cu.Devices()
except (OSError, cu.CUDARuntimeError):
devices = None
if devices is None or not len(devices):
raise DeviceNotFoundError("No CUDA devices were found")
self._id = device
if device == "":
context = devices.create_some_context()
else:
try:
device = devices[int(device)]
except IndexError:
raise from_none(
DeviceNotFoundError(
"CUDA device %s was not found." % device))
context = device.create_context()
self._context_ = context
device = self.context.device
self.device_info = DeviceInfo(
desc=device.name, memsize=device.total_mem,
memalign=4096, version=device.compute_capability,
device_type="CUDA",
max_work_group_size=device.max_grid_dims,
max_work_item_sizes=device.max_block_dims,
local_memsize=device.max_shared_memory_per_block)
return True
def sync(self):
self.context.synchronize()
@staticmethod
def available():
try:
return len(cu.Devices()) > 0
except:
return False
def _alloc_temp_buffer(self, size):
# Allocate the buffer
return cu.MemAlloc(self.context, size)
@add_metaclass(BackendRegistry)
class NumpyDevice(Device):
"""Python numpy pseudo device class.
"""
BACKEND = "numpy"
PRIORITY = 10
ASYNC = False
def __new__(cls, *args):
return object.__new__(cls)
def __init__(self):
super(NumpyDevice, self).__init__()
self.device_info = DeviceInfo(
desc="Python Numpy", memsize=virtual_memory().total,
memalign=8, version=numpy.__version__,
device_type="Hybrid",
max_work_group_size=None, max_work_item_sizes=None,
local_memsize=virtual_memory().total)
@staticmethod
def available():
return True
@staticmethod
def iterparse(text):
yield "numpy", "numpy"
def _alloc_temp_buffer(self, size):
# Allocate the buffer
return numpy.zeros(size, dtype=numpy.uint8)
| 35.395153 | 105 | 0.577077 |
1a50057aa9de328c43c7ce39610e090b9fc56f4d | 765 | py | Python | EnergyIntensityIndicators/__init__.py | NREL/EnergyIntensityIndicators | 6d5a6d528ecd27b930d82088055224473ba2d63e | [
"BSD-3-Clause"
] | 7 | 2020-07-30T15:02:23.000Z | 2022-01-23T20:02:55.000Z | EnergyIntensityIndicators/__init__.py | NREL/EnergyIntensityIndicators | 6d5a6d528ecd27b930d82088055224473ba2d63e | [
"BSD-3-Clause"
] | 36 | 2020-06-18T15:47:32.000Z | 2021-09-13T21:20:49.000Z | EnergyIntensityIndicators/__init__.py | NREL/EnergyIntensityIndicators | 6d5a6d528ecd27b930d82088055224473ba2d63e | [
"BSD-3-Clause"
] | 2 | 2020-06-18T13:30:43.000Z | 2020-11-17T11:34:10.000Z | """The Energy Intensity Indicators Model
"""
from __future__ import print_function, division, absolute_import
import os
from EnergyIntensityIndicators.Residential import residential_floorspace
from EnergyIntensityIndicators.Industry import (manufacturing,
nonmanufacuturing, asm_price_fit)
from EnergyIntensityIndicators import (industry, residential, commercial, transportation,
electricity, additive_lmdi, multiplicative_lmdi,
LMDI)
__author__ = 'Isabelle Rabideau'
__email__ = 'isabelle.rabideau@nrel.gov'
EIIDIR = os.path.dirname(os.path.realpath(__file__))
TESTDATADIR = os.path.join(os.path.dirname(EIIDIR), 'tests', 'data') | 42.5 | 90 | 0.686275 |
6897cea8707c16965c2d6219cfc4e79998bc2f3e | 9,372 | py | Python | trezor-crypto/crypto/tests/test_curves.py | Raden-Hor/wallet-core | 3e64de57ab70e2ce8ecd78e43cdaf290bf334821 | [
"MIT"
] | 1,306 | 2019-08-08T13:25:24.000Z | 2022-03-31T23:32:28.000Z | trezor-crypto/crypto/tests/test_curves.py | Raden-Hor/wallet-core | 3e64de57ab70e2ce8ecd78e43cdaf290bf334821 | [
"MIT"
] | 1,179 | 2019-08-08T07:06:10.000Z | 2022-03-31T12:33:47.000Z | trezor-crypto/crypto/tests/test_curves.py | Raden-Hor/wallet-core | 3e64de57ab70e2ce8ecd78e43cdaf290bf334821 | [
"MIT"
] | 811 | 2019-08-08T13:27:44.000Z | 2022-03-31T21:22:53.000Z | #!/usr/bin/py.test
import binascii
import ctypes as c
import hashlib
import os
import random
import curve25519
import ecdsa
import pytest
def bytes2num(s):
res = 0
for i, b in enumerate(reversed(bytearray(s))):
res += b << (i * 8)
return res
curves = {"nist256p1": ecdsa.curves.NIST256p, "secp256k1": ecdsa.curves.SECP256k1}
class Point:
def __init__(self, name, x, y):
self.curve = name
self.x = x
self.y = y
points = [
Point(
"secp256k1",
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8,
),
Point(
"secp256k1",
0x1,
0x4218F20AE6C646B363DB68605822FB14264CA8D2587FDD6FBC750D587E76A7EE,
),
Point(
"secp256k1",
0x2,
0x66FBE727B2BA09E09F5A98D70A5EFCE8424C5FA425BBDA1C511F860657B8535E,
),
Point(
"secp256k1",
0x1B,
0x1ADCEA1CF831B0AD1653E769D1A229091D0CC68D4B0328691B9CAACC76E37C90,
),
Point(
"nist256p1",
0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,
0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5,
),
Point(
"nist256p1",
0x0,
0x66485C780E2F83D72433BD5D84A06BB6541C2AF31DAE871728BF856A174F93F4,
),
Point(
"nist256p1",
0x0,
0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B,
),
Point(
"nist256p1",
0xAF8BBDFE8CDD5577ACBF345B543D28CF402F4E94D3865B97EA0787F2D3AA5D22,
0x35802B8B376B995265918B078BC109C21A535176585C40F519ACA52D6AFC147C,
),
Point(
"nist256p1",
0x80000,
0x580610071F440F0DCC14A22E2D5D5AFC1224C0CD11A3B4B51B8ECD2224EE1CE2,
),
]
random_iters = int(os.environ.get("ITERS", 1))
DIR = os.path.abspath(os.path.dirname(__file__))
lib = c.cdll.LoadLibrary(os.path.join(DIR, "libtrezor-crypto.so"))
BIGNUM = c.c_uint32 * 9
class curve_info(c.Structure):
_fields_ = [("bip32_name", c.c_char_p), ("params", c.c_void_p)]
class curve_point(c.Structure):
_fields_ = [("x", BIGNUM), ("y", BIGNUM)]
class ecdsa_curve(c.Structure):
_fields_ = [
("prime", BIGNUM),
("G", curve_point),
("order", BIGNUM),
("order_half", BIGNUM),
("a", c.c_int),
("b", BIGNUM),
]
lib.get_curve_by_name.restype = c.POINTER(curve_info)
class Random(random.Random):
def randbytes(self, n):
buf = (c.c_uint8 * n)()
for i in range(n):
buf[i] = self.randrange(0, 256)
return buf
def randpoint(self, curve):
k = self.randrange(0, curve.order)
return k * curve.generator
def int2bn(x, bn_type=BIGNUM):
b = bn_type()
b._int = x
for i in range(len(b)):
b[i] = x % (1 << 29)
x = x >> 29
return b
def bn2int(b):
x = 0
for i in range(len(b)):
x += b[i] << (29 * i)
return x
@pytest.fixture(params=range(random_iters))
def r(request):
seed = request.param
return Random(seed + int(os.environ.get("SEED", 0)))
@pytest.fixture(params=list(sorted(curves)))
def curve(request):
name = request.param
curve_ptr = lib.get_curve_by_name(bytes(name, "ascii")).contents.params
assert curve_ptr, "curve {} not found".format(name)
curve_obj = curves[name]
curve_obj.ptr = c.cast(curve_ptr, c.POINTER(ecdsa_curve))
curve_obj.p = curve_obj.curve.p() # shorthand
return curve_obj
@pytest.fixture(params=points)
def point(request):
name = request.param.curve
curve_ptr = lib.get_curve_by_name(bytes(name, "ascii")).contents.params
assert curve_ptr, "curve {} not found".format(name)
curve_obj = curves[name]
curve_obj.ptr = c.c_void_p(curve_ptr)
curve_obj.p = ecdsa.ellipticcurve.Point(
curve_obj.curve, request.param.x, request.param.y
)
return curve_obj
POINT = BIGNUM * 2
def to_POINT(p):
return POINT(int2bn(p.x()), int2bn(p.y()))
def from_POINT(p):
return (bn2int(p[0]), bn2int(p[1]))
JACOBIAN = BIGNUM * 3
def to_JACOBIAN(jp):
return JACOBIAN(int2bn(jp[0]), int2bn(jp[1]), int2bn(jp[2]))
def from_JACOBIAN(p):
return (bn2int(p[0]), bn2int(p[1]), bn2int(p[2]))
def test_curve_parameters(curve):
assert curve.curve.p() == bn2int(curve.ptr.contents.prime)
assert curve.generator.x() == bn2int(curve.ptr.contents.G.x)
assert curve.generator.y() == bn2int(curve.ptr.contents.G.y)
assert curve.order == bn2int(curve.ptr.contents.order)
assert curve.order // 2 == bn2int(curve.ptr.contents.order_half)
assert curve.curve.a() == curve.ptr.contents.a
assert curve.curve.b() == bn2int(curve.ptr.contents.b)
def test_point_multiply(curve, r):
p = r.randpoint(curve)
k = r.randrange(0, 2 ** 256)
kp = k * p
res = POINT(int2bn(0), int2bn(0))
lib.point_multiply(curve.ptr, int2bn(k), to_POINT(p), res)
res = from_POINT(res)
assert res == (kp.x(), kp.y())
def test_point_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
# print '-' * 80
q = p1 + p2
q1 = to_POINT(p1)
q2 = to_POINT(p2)
lib.point_add(curve.ptr, q1, q2)
q_ = from_POINT(q2)
assert q_ == (q.x(), q.y())
def test_point_double(curve, r):
p = r.randpoint(curve)
q = p.double()
q_ = to_POINT(p)
lib.point_double(curve.ptr, q_)
q_ = from_POINT(q_)
assert q_ == (q.x(), q.y())
def test_point_to_jacobian(curve, r):
p = r.randpoint(curve)
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, int2bn(curve.p))
jx, jy, jz = from_JACOBIAN(jp)
assert jx % curve.p == (p.x() * jz ** 2) % curve.p
assert jy % curve.p == (p.y() * jz ** 3) % curve.p
q = POINT()
lib.jacobian_to_curve(jp, q, int2bn(curve.p))
q = from_POINT(q)
assert q == (p.x(), p.y())
def test_jacobian_add(curve, r):
p1 = r.randpoint(curve)
p2 = r.randpoint(curve)
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_add_double(curve, r):
p1 = r.randpoint(curve)
p2 = p1
prime = int2bn(curve.p)
q = POINT()
jp2 = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p2), jp2, prime)
lib.point_jacobian_add(to_POINT(p1), jp2, curve.ptr)
lib.jacobian_to_curve(jp2, q, prime)
q = from_POINT(q)
p_ = p1 + p2
assert (p_.x(), p_.y()) == q
def test_jacobian_double(curve, r):
p = r.randpoint(curve)
p2 = p.double()
prime = int2bn(curve.p)
q = POINT()
jp = JACOBIAN()
lib.curve_to_jacobian(to_POINT(p), jp, prime)
lib.point_jacobian_double(jp, curve.ptr)
lib.jacobian_to_curve(jp, q, prime)
q = from_POINT(q)
assert (p2.x(), p2.y()) == q
def sigdecode(sig, _):
return map(bytes2num, [sig[:32], sig[32:]])
def test_sign(curve, r):
priv = r.randbytes(32)
digest = r.randbytes(32)
sig = r.randbytes(64)
lib.ecdsa_sign_digest(curve.ptr, priv, digest, sig, c.c_void_p(0), c.c_void_p(0))
exp = bytes2num(priv)
sk = ecdsa.SigningKey.from_secret_exponent(exp, curve, hashfunc=hashlib.sha256)
vk = sk.get_verifying_key()
sig_ref = sk.sign_digest_deterministic(
digest, hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string_canonize
)
assert binascii.hexlify(sig) == binascii.hexlify(sig_ref)
assert vk.verify_digest(sig, digest, sigdecode)
def test_validate_pubkey(curve, r):
p = r.randpoint(curve)
assert lib.ecdsa_validate_pubkey(curve.ptr, to_POINT(p))
def test_validate_pubkey_direct(point):
assert lib.ecdsa_validate_pubkey(point.ptr, to_POINT(point.p))
def test_curve25519(r):
sec1 = bytes(bytearray(r.randbytes(32)))
sec2 = bytes(bytearray(r.randbytes(32)))
pub1 = curve25519.Private(sec1).get_public()
pub2 = curve25519.Private(sec2).get_public()
session1 = r.randbytes(32)
lib.curve25519_scalarmult(session1, sec2, pub1.public)
session2 = r.randbytes(32)
lib.curve25519_scalarmult(session2, sec1, pub2.public)
assert bytearray(session1) == bytearray(session2)
shared1 = curve25519.Private(sec2).get_shared_key(pub1, hashfunc=lambda x: x)
shared2 = curve25519.Private(sec1).get_shared_key(pub2, hashfunc=lambda x: x)
assert shared1 == shared2
assert bytearray(session1) == shared1
assert bytearray(session2) == shared2
def test_curve25519_pubkey(r):
sec = bytes(bytearray(r.randbytes(32)))
pub = curve25519.Private(sec).get_public()
res = r.randbytes(32)
lib.curve25519_scalarmult_basepoint(res, sec)
assert bytearray(res) == pub.public
def test_curve25519_scalarmult_from_gpg(r):
sec = binascii.unhexlify(
"4a1e76f133afb29dbc7860bcbc16d0e829009cc15c2f81ed26de1179b1d9c938"
)
pub = binascii.unhexlify(
"5d6fc75c016e85b17f54e0128a216d5f9229f25bac1ec85cecab8daf48621b31"
)
res = r.randbytes(32)
lib.curve25519_scalarmult(res, sec[::-1], pub[::-1])
expected = "a93dbdb23e5c99da743e203bd391af79f2b83fb8d0fd6ec813371c71f08f2d4d"
assert binascii.hexlify(bytearray(res)) == bytes(expected, "ascii")
| 26.474576 | 87 | 0.65621 |
33f809fe37eeccbc2fcdb484fd605b3294e35eea | 235 | py | Python | mindhome_alpha/erpnext/patches/v7_1/fix_link_for_customer_from_lead.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v7_1/fix_link_for_customer_from_lead.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v7_1/fix_link_for_customer_from_lead.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe
def execute():
for c in frappe.db.sql('select name from tabCustomer where ifnull(lead_name,"")!=""'):
customer = frappe.get_doc('Customer', c[0])
customer.update_lead_status() | 33.571429 | 87 | 0.753191 |
c1ef9d6ccfaec27853cf1629fd16414d164a0b41 | 200 | py | Python | python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitFunctionInPy36.py | alwyn/intellij-community | 22e80b2aa9779d553c44e33929ad49a8a94b8449 | [
"Apache-2.0"
] | null | null | null | python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitFunctionInPy36.py | alwyn/intellij-community | 22e80b2aa9779d553c44e33929ad49a8a94b8449 | [
"Apache-2.0"
] | null | null | null | python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitFunctionInPy36.py | alwyn/intellij-community | 22e80b2aa9779d553c44e33929ad49a8a94b8449 | [
"Apache-2.0"
] | 1 | 2019-02-06T14:50:03.000Z | 2019-02-06T14:50:03.000Z | def <warning descr="'async' and 'await' are not recommended to be used as variable, class, function or module names. They will become proper keywords in Python 3.7.">a<caret>wait</warning>():
pass | 100 | 191 | 0.74 |
25a00d6705a3db59f23fafc197fd8161a45a7d36 | 4,734 | py | Python | clus/eof_tool.py | jhardenberg/EnsClus | c7591aa39d649fc4321ac4db219f241aabcaf295 | [
"Apache-2.0"
] | null | null | null | clus/eof_tool.py | jhardenberg/EnsClus | c7591aa39d649fc4321ac4db219f241aabcaf295 | [
"Apache-2.0"
] | null | null | null | clus/eof_tool.py | jhardenberg/EnsClus | c7591aa39d649fc4321ac4db219f241aabcaf295 | [
"Apache-2.0"
] | 1 | 2019-02-13T18:00:34.000Z | 2019-02-13T18:00:34.000Z | # Standard packages
import numpy as np
import pickle
import datetime
from eofs.standard import Eof
import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import cartopy.crs as ccrs
import cartopy.feature as cfeature
# Ignore /.../anaconda3/lib/python3.5/site-packages/matplotlib/artist.py:221:
# MatplotlibDeprecationWarning: This has been deprecated in mpl 1.5, please use the
# axes property. A removal date has not been set. warnings.warn(_get_axes_msg, mplDeprecation, stacklevel=1)
from matplotlib.cbook import MatplotlibDeprecationWarning
import warnings
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
#////////////////////////////////////////////////////////////////////////////////////////////////
#____________FUNCTION 1: Computing the EOFs and PCs
def eof_computation(var,varunits,lat,lon):
#----------------------------------------------------------------------------------------
print('____________________________________________________________________________________________________________________')
print('Computing the EOFs and PCs')
#----------------------------------------------------------------------------------------
# EOF analysis of a data array with spatial dimensions that
# represent latitude and longitude with weighting. In this example
# the data array is dimensioned (ntime, nlat, nlon), and in order
# for the latitude weights to be broadcastable to this shape, an
# extra length-1 dimension is added to the end:
weights_array = np.sqrt(np.cos(np.deg2rad(lat)))[:, np.newaxis]
start = datetime.datetime.now()
solver = Eof(var, weights=weights_array)
end = datetime.datetime.now()
print('EOF computation took me %s seconds' %(end-start))
#ALL VARIANCE FRACTIONS
varfrac=solver.varianceFraction()
acc=np.cumsum(varfrac*100)
#------------------------------------------PCs unscaled (case 0 of scaling)
pcs_unscal0 = solver.pcs()
#------------------------------------------EOFs unscaled (case 0 of scaling)
eofs_unscal0 = solver.eofs()
#------------------------------------------PCs scaled (case 1 of scaling)
pcs_scal1 = solver.pcs(pcscaling=1)
#------------------------------------------EOFs scaled (case 2 of scaling)
eofs_scal2 = solver.eofs(eofscaling=2)
return solver, pcs_scal1, eofs_scal2, pcs_unscal0, eofs_unscal0, varfrac
#////////////////////////////////////////////////////////////////////////////////////////////////
#____________FUNCTION 2: Plot of the nth the EOFs and PCs
def eof_plots(neof,pcs_scal1, eofs_scal2,var,varunits,lat,lon,tit,numens):
#----------------------------------------------------------------------------------------
print('____________________________________________________________________________________________________________________')
print('Plotting the EOFs and PCs')
#----------------------------------------------------------------------------------------
# Plot the PC scaled (divided by the square-root of their eigenvalues) in the selected domain
#------------------------------------------PCs scaled (case 1 of scaling)
figPC_scal1 = plt.figure(figsize=(24,14))
ax = figPC_scal1.gca()
plt.plot(pcs_scal1[:,neof])
plt.axhline(y=0, color='k', linestyle='--')
ttPC='{0} PC{1}: explained variance {2}%\n'.format(tit,neof+1,"%.2f" %(varfrac[neof]*100))
plt.title(ttPC, fontsize=34, fontweight='bold')
plt.grid(True)
for tickx in ax.xaxis.get_major_ticks():
tickx.label.set_fontsize(28)
for ticky in ax.yaxis.get_major_ticks():
ticky.label.set_fontsize(28)
plt.ylabel('PC{0} {1}'.format(neof,varunits),fontsize=28)
plt.xlabel('ensemble members',fontsize=28)
# Plot the EOF scaled (multiplied by the square-root of their eigenvalues) in the selected domain
#------------------------------------------EOFs scaled (case 2 of scaling)
#rangecolorbar=np.arange(-180, 200, 20)
figEOF_scal2 = plt.figure(figsize=(14,14))
#ax = figEOF_scal2.gca()
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_global()
ax.coastlines()
ax.gridlines()
fill2 = ax.contourf(lon, lat, eofs_scal2[neof,...],cmap=plt.cm.RdBu_r, transform=ccrs.PlateCarree())
cb = plt.colorbar(fill2, orientation='horizontal')
#cb.ax.set_position([0.9, 0.1, 0.001, 0.7])#([0.9, 0.1, 0.02, 0.8])
cb.set_label(varunits, rotation=0, fontsize=20)
cb.ax.tick_params(labelsize=20)
ttEOF='{0}\nEOF{1}: explained variance {2}%\n'.format(tit,neof+1,"%.2f" %(varfrac[neof]*100))
plt.title(ttEOF, fontsize=34, fontweight='bold')
plt.tight_layout()
return figPC_scal1, figEOF_scal2
| 47.818182 | 129 | 0.599071 |
8bcadabd462ae284b6ae5d6cf234ccd1f984f405 | 1,188 | py | Python | IntroToCS_ex2/quadratic_equation.py | nadavWeisler/IntroToCS | 725fc8822eeb34f6917692846689dee29b24af55 | [
"MIT"
] | null | null | null | IntroToCS_ex2/quadratic_equation.py | nadavWeisler/IntroToCS | 725fc8822eeb34f6917692846689dee29b24af55 | [
"MIT"
] | null | null | null | IntroToCS_ex2/quadratic_equation.py | nadavWeisler/IntroToCS | 725fc8822eeb34f6917692846689dee29b24af55 | [
"MIT"
] | null | null | null | #############################################################
# FILE : quadratic_equation.py
# WRITER : Nadav Weisler , Weisler , 316493758
# EXERCISE : intro2cs ex2 2019
# DESCRIPTION: esulva quadratic equation
#############################################################
import math
def quadratic_equation(num1, num2, num3):
"""Solve quadratic equation"""
delta = (num2 ** 2) - (4 * num1 * num3)
if delta < 0:
return None
elif delta > 0:
return (-num2 + math.sqrt(delta)) / 2 * num1, (-num2 - math.sqrt(delta)) / 2 * num1
else:
return -num2 / 2 * num1, None
def quadratic_equation_user_input():
"""Solve quadratic equation from user input using "quadratic_equation" function"""
nums_string = input("Insert coefficients a, b, and c: ")
nums_split = nums_string.split(" ")
sols = quadratic_equation(float(nums_split[0]), float(nums_split[1]), float(nums_split[2]))
if sols is None:
print("The equation has no solutions")
elif sols[1] is None:
print("The equation has 1 solution: " + str(sols[0]))
else:
print("The equation has 2 solutions: " + str(sols[0]) + " and " + str(sols[1]))
| 33 | 95 | 0.571549 |
17e7d91b0c194cec5532a8f4dd2cbcf67a2c8f58 | 14,804 | py | Python | src/run_asc.py | dasaep/BERT-for-ABSA | 7a6d5a8250605c68bf29bec315d6ee5ce054dbfc | [
"Apache-2.0"
] | 33 | 2020-10-27T02:03:24.000Z | 2022-03-28T02:29:30.000Z | src/run_asc.py | dasaep/BERT-for-ABSA | 7a6d5a8250605c68bf29bec315d6ee5ce054dbfc | [
"Apache-2.0"
] | null | null | null | src/run_asc.py | dasaep/BERT-for-ABSA | 7a6d5a8250605c68bf29bec315d6ee5ce054dbfc | [
"Apache-2.0"
] | 13 | 2020-11-09T08:47:28.000Z | 2021-12-05T06:50:53.000Z | # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import random
import json
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from tokenization import BertTokenizer
from modeling import BertModel, BertPreTrainedModel, BertLayer, BertPooler
from optimization import BertAdam
import absa_data_utils as data_utils
from absa_data_utils import ABSATokenizer
import modelconfig
from math import ceil
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
class GRoIE(nn.Module):
def __init__(self, count, config, num_labels):
super(GRoIE, self).__init__()
self.count = count
self.num_labels = num_labels
self.pooler = BertPooler(config)
self.pre_layers = torch.nn.ModuleList()
self.loss_fct = torch.nn.ModuleList()
self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
for i in range(count):
self.pre_layers.append(BertLayer(config))
self.loss_fct.append(torch.nn.CrossEntropyLoss(ignore_index=-1))
def forward(self, layers, attention_mask, labels):
losses = []
logitses = []
for i in range(self.count):
layer = self.pre_layers[i](layers[-i-1], attention_mask)
layer = self.pooler(layer)
logits = self.classifier(layer)
if labels is not None:
loss = self.loss_fct[i](logits.view(-1, self.num_labels), labels.view(-1))
losses.append(loss)
logitses.append(logits)
if labels is not None:
total_loss = torch.sum(torch.stack(losses), dim=0)
else:
total_loss = torch.Tensor(0)
avg_logits = torch.sum(torch.stack(logitses), dim=0)/self.count
return total_loss, avg_logits
class BertForABSA(BertPreTrainedModel):
def __init__(self, config, num_labels=3):
super(BertForABSA, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.groie = GRoIE(4, config, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
layers, _, mask = self.bert(input_ids, token_type_ids,
attention_mask=attention_mask,
output_all_encoded_layers=True)
loss, logits = self.groie(layers, mask, labels)
if labels is not None:
return loss
else:
return logits
def train(args):
processor = data_utils.AscProcessor()
label_list = processor.get_labels()
tokenizer = ABSATokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model])
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(len(train_examples) / args.train_batch_size) * args.num_train_epochs
train_features = data_utils.convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, "asc")
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
#>>>>> validation
if args.do_valid:
valid_examples = processor.get_dev_examples(args.data_dir)
valid_features=data_utils.convert_examples_to_features(
valid_examples, label_list, args.max_seq_length, tokenizer, "asc")
valid_all_input_ids = torch.tensor([f.input_ids for f in valid_features], dtype=torch.long)
valid_all_segment_ids = torch.tensor([f.segment_ids for f in valid_features], dtype=torch.long)
valid_all_input_mask = torch.tensor([f.input_mask for f in valid_features], dtype=torch.long)
valid_all_label_ids = torch.tensor([f.label_id for f in valid_features], dtype=torch.long)
valid_data = TensorDataset(valid_all_input_ids, valid_all_segment_ids, valid_all_input_mask, valid_all_label_ids)
logger.info("***** Running validations *****")
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
logger.info(" Batch size = %d", args.train_batch_size)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=args.train_batch_size)
best_valid_loss=float('inf')
valid_losses=[]
#<<<<< end of validation declaration
model = BertForABSA.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model], num_labels=len(label_list))
model.cuda()
# Prepare optimizer
param_optimizer = [(k, v) for k, v in model.named_parameters() if v.requires_grad==True]
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
model.train()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
batch = tuple(t.cuda() for t in batch)
input_ids, segment_ids, input_mask, label_ids = batch
optimizer.zero_grad()
loss = model(input_ids, segment_ids, input_mask, label_ids)
loss.backward()
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
global_step += 1
print("training loss: ", loss.item(), epoch+1)
#>>>> perform validation at the end of each epoch.
new_dirs = os.path.join(args.output_dir, str(epoch+1))
os.mkdir(new_dirs)
if args.do_valid:
model.eval()
with torch.no_grad():
losses=[]
valid_size=0
for step, batch in enumerate(valid_dataloader):
batch = tuple(t.cuda() for t in batch) # multi-gpu does scattering it-self
input_ids, segment_ids, input_mask, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
losses.append(loss.data.item()*input_ids.size(0) )
valid_size+=input_ids.size(0)
valid_loss=sum(losses)/valid_size
logger.info("validation loss: %f, epoch: %d", valid_loss, epoch+1)
valid_losses.append(valid_loss)
torch.save(model, os.path.join(new_dirs, "model.pt"))
test(args, new_dirs, dev_as_test=True)
if epoch == args.num_train_epochs-1:
torch.save(model, os.path.join(args.output_dir, "model.pt"))
test(args, args.output_dir, dev_as_test=False)
os.remove(os.path.join(new_dirs, "model.pt"))
if valid_loss<best_valid_loss:
best_valid_loss=valid_loss
model.train()
if args.do_valid:
with open(os.path.join(args.output_dir, "valid.json"), "w") as fw:
json.dump({"valid_losses": valid_losses}, fw)
else:
torch.save(model, os.path.join(args.output_dir, "model.pt") )
def test(args, new_dirs=None, dev_as_test=None): # Load a trained model that you have fine-tuned (we assume evaluate on cpu)
processor = data_utils.AscProcessor()
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model])
if dev_as_test:
data_dir = os.path.join(args.data_dir, 'dev_as_test')
else:
data_dir = args.data_dir
eval_examples = processor.get_test_examples(data_dir)
eval_features = data_utils.convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, "asc")
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model = torch.load(os.path.join(new_dirs, "model.pt"))
model.cuda()
model.eval()
full_logits=[]
full_label_ids=[]
for step, batch in enumerate(eval_dataloader):
batch = tuple(t.cuda() for t in batch)
input_ids, segment_ids, input_mask, label_ids = batch
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.cpu().numpy()
full_logits.extend(logits.tolist())
full_label_ids.extend(label_ids.tolist())
output_eval_json = os.path.join(new_dirs, "predictions.json")
with open(output_eval_json, "w") as fw:
json.dump({"logits": full_logits, "label_ids": full_label_ids}, fw)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert_model", default='bert-base', type=str)
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir containing json files.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_valid",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=6,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
if args.do_train:
train(args)
if args.do_eval:
test(args)
if __name__=="__main__":
main() | 43.798817 | 134 | 0.62625 |
08f348ce7cda470e3202e7ab52d5395eca2b3b37 | 4,117 | py | Python | bcs-ui/backend/tests/dashboard/workloads/test_pod.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/tests/dashboard/workloads/test_pod.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/tests/dashboard/workloads/test_pod.py | masanqi/bk-bcs | 70d97b674fbd5beacde21d6ca8be914d7eb56865 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import time
import mock
import pytest
from backend.dashboard.examples.utils import load_demo_manifest
from backend.tests.conftest import TEST_NAMESPACE
from backend.tests.dashboard.conftest import DASHBOARD_API_URL_COMMON_PREFIX as DAU_PREFIX
from backend.utils.basic import getitems
pytestmark = pytest.mark.django_db
class TestPod:
""" 测试 Pod 相关接口 """
manifest = load_demo_manifest('workloads/simple_pod')
name = getitems(manifest, 'metadata.name')
batch_url = f'{DAU_PREFIX}/workloads/pods/'
detail_url = f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/workloads/pods/{name}/'
def test_create(self, api_client):
""" 测试创建资源接口 """
response = api_client.post(self.batch_url, data={'manifest': self.manifest})
assert response.json()['code'] == 0
def test_list(self, api_client):
""" 测试获取资源列表接口 """
response = api_client.get(self.batch_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'PodList'
def test_retrieve(self, api_client):
""" 测试获取单个资源接口 """
response = api_client.get(self.detail_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'Pod'
def test_destroy(self, api_client):
""" 测试删除单个资源 """
response = api_client.delete(self.detail_url)
assert response.json()['code'] == 0
def test_list_pod_pvcs(self, api_client, patch_pod_client):
""" 测试获取 Pod 关联 PersistentVolumeClaim """
response = api_client.get(f'{self.detail_url}pvcs/')
assert response.json()['code'] == 0
def test_list_pod_configmaps(self, api_client, patch_pod_client):
""" 测试获取 Pod 关联 ConfigMap """
response = api_client.get(f'{self.detail_url}configmaps/')
assert response.json()['code'] == 0
def test_list_pod_secrets(self, api_client, patch_pod_client):
""" 测试获取单个资源接口 """
response = api_client.get(f'{self.detail_url}secrets/')
assert response.json()['code'] == 0
@mock.patch('backend.dashboard.workloads.views.pod.validate_cluster_perm', new=lambda *args, **kwargs: True)
def test_reschedule(self, api_client):
"""
测试重新调度 Pod
TODO 可考虑 mock 掉下发集群操作,仅验证接口功能
"""
# 创建有父级资源的 Pod,测试重新调度
deploy_manifest = load_demo_manifest('workloads/simple_deployment')
deploy_name = deploy_manifest['metadata']['name']
api_client.post(f'{DAU_PREFIX}/workloads/deployments/', data={'manifest': deploy_manifest})
# 等待 Deployment 下属 Pod 创建
time.sleep(3)
# 找到 Deployment 下属的 第一个 Pod Name
resp = api_client.get(
f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/workloads/pods/',
data={'label_selector': 'app=nginx', 'owner_kind': 'Deployment', 'owner_name': deploy_name},
)
pods = getitems(resp.json(), 'data.manifest.items', [])
pod_name = getitems(pods[0], 'metadata.name')
resp = api_client.put(f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/workloads/pods/{pod_name}/reschedule/')
assert resp.json()['code'] == 0
assert getitems(resp.json(), 'data.metadata.name') == pod_name
# 清理测试用的资源
resp = api_client.delete(f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/workloads/deployments/{deploy_name}/')
assert resp.json()['code'] == 0
| 42.010204 | 115 | 0.679864 |
1cd2cad23639e23a7d2242f6b013484e468be573 | 13,122 | py | Python | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/api.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/api.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/api.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Api']
class Api(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
api_revision: Optional[pulumi.Input[str]] = None,
api_type: Optional[pulumi.Input[Union[str, 'ApiType']]] = None,
api_version: Optional[pulumi.Input[str]] = None,
api_version_set: Optional[pulumi.Input[pulumi.InputType['ApiVersionSetContractArgs']]] = None,
api_version_set_id: Optional[pulumi.Input[str]] = None,
authentication_settings: Optional[pulumi.Input[pulumi.InputType['AuthenticationSettingsContractArgs']]] = None,
content_format: Optional[pulumi.Input[Union[str, 'ContentFormat']]] = None,
content_value: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input['Protocol']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_url: Optional[pulumi.Input[str]] = None,
subscription_key_parameter_names: Optional[pulumi.Input[pulumi.InputType['SubscriptionKeyParameterNamesContractArgs']]] = None,
wsdl_selector: Optional[pulumi.Input[pulumi.InputType['ApiCreateOrUpdatePropertiesWsdlSelectorArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
API details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param pulumi.Input[str] api_revision: Describes the Revision of the Api. If no value is provided, default revision 1 is created
:param pulumi.Input[Union[str, 'ApiType']] api_type: Type of API.
:param pulumi.Input[str] api_version: Indicates the Version identifier of the API if the API is versioned
:param pulumi.Input[pulumi.InputType['ApiVersionSetContractArgs']] api_version_set: Api Version Set Contract details.
:param pulumi.Input[str] api_version_set_id: A resource identifier for the related ApiVersionSet.
:param pulumi.Input[pulumi.InputType['AuthenticationSettingsContractArgs']] authentication_settings: Collection of authentication settings included into this API.
:param pulumi.Input[Union[str, 'ContentFormat']] content_format: Format of the Content in which the API is getting imported.
:param pulumi.Input[str] content_value: Content value when Importing an API.
:param pulumi.Input[str] description: Description of the API. May include HTML formatting tags.
:param pulumi.Input[str] display_name: API name.
:param pulumi.Input[str] path: Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance. It is appended to the API endpoint base URL specified during the service instance creation to form a public URL for this API.
:param pulumi.Input[Sequence[pulumi.Input['Protocol']]] protocols: Describes on which protocols the operations in this API can be invoked.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] service_url: Absolute URL of the backend service implementing this API.
:param pulumi.Input[pulumi.InputType['SubscriptionKeyParameterNamesContractArgs']] subscription_key_parameter_names: Protocols over which API is made available.
:param pulumi.Input[pulumi.InputType['ApiCreateOrUpdatePropertiesWsdlSelectorArgs']] wsdl_selector: Criteria to limit import of WSDL to a subset of the document.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_id'] = api_id
__props__['api_revision'] = api_revision
__props__['api_type'] = api_type
__props__['api_version'] = api_version
__props__['api_version_set'] = api_version_set
__props__['api_version_set_id'] = api_version_set_id
__props__['authentication_settings'] = authentication_settings
__props__['content_format'] = content_format
__props__['content_value'] = content_value
__props__['description'] = description
__props__['display_name'] = display_name
if path is None and not opts.urn:
raise TypeError("Missing required property 'path'")
__props__['path'] = path
__props__['protocols'] = protocols
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['service_url'] = service_url
__props__['subscription_key_parameter_names'] = subscription_key_parameter_names
__props__['wsdl_selector'] = wsdl_selector
__props__['is_current'] = None
__props__['is_online'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20160707:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20161010:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Api"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Api")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Api, __self__).__init__(
'azure-nextgen:apimanagement/v20170301:Api',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Api':
"""
Get an existing Api resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Api(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiRevision")
def api_revision(self) -> pulumi.Output[Optional[str]]:
"""
Describes the Revision of the Api. If no value is provided, default revision 1 is created
"""
return pulumi.get(self, "api_revision")
@property
@pulumi.getter(name="apiType")
def api_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of API.
"""
return pulumi.get(self, "api_type")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
Indicates the Version identifier of the API if the API is versioned
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="apiVersionSet")
def api_version_set(self) -> pulumi.Output[Optional['outputs.ApiVersionSetContractResponse']]:
"""
Api Version Set Contract details.
"""
return pulumi.get(self, "api_version_set")
@property
@pulumi.getter(name="apiVersionSetId")
def api_version_set_id(self) -> pulumi.Output[Optional[str]]:
"""
A resource identifier for the related ApiVersionSet.
"""
return pulumi.get(self, "api_version_set_id")
@property
@pulumi.getter(name="authenticationSettings")
def authentication_settings(self) -> pulumi.Output[Optional['outputs.AuthenticationSettingsContractResponse']]:
"""
Collection of authentication settings included into this API.
"""
return pulumi.get(self, "authentication_settings")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the API. May include HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
API name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="isCurrent")
def is_current(self) -> pulumi.Output[bool]:
"""
Indicates if API revision is current api revision.
"""
return pulumi.get(self, "is_current")
@property
@pulumi.getter(name="isOnline")
def is_online(self) -> pulumi.Output[bool]:
"""
Indicates if API revision is accessible via the gateway.
"""
return pulumi.get(self, "is_online")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> pulumi.Output[str]:
"""
Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance. It is appended to the API endpoint base URL specified during the service instance creation to form a public URL for this API.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def protocols(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Describes on which protocols the operations in this API can be invoked.
"""
return pulumi.get(self, "protocols")
@property
@pulumi.getter(name="serviceUrl")
def service_url(self) -> pulumi.Output[Optional[str]]:
"""
Absolute URL of the backend service implementing this API.
"""
return pulumi.get(self, "service_url")
@property
@pulumi.getter(name="subscriptionKeyParameterNames")
def subscription_key_parameter_names(self) -> pulumi.Output[Optional['outputs.SubscriptionKeyParameterNamesContractResponse']]:
"""
Protocols over which API is made available.
"""
return pulumi.get(self, "subscription_key_parameter_names")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.420664 | 711 | 0.666057 |
d75bf36b7559d6072b02130f9c143eceeea0cf2d | 2,991 | py | Python | cookbook/views/data.py | mhoellmann/recipes | 525aa4e4a4f218a47e1770498fff9fa8b0d7a097 | [
"MIT"
] | null | null | null | cookbook/views/data.py | mhoellmann/recipes | 525aa4e4a4f218a47e1770498fff9fa8b0d7a097 | [
"MIT"
] | 1 | 2020-04-11T09:47:20.000Z | 2020-04-11T09:47:20.000Z | cookbook/views/data.py | mcejp/recipes | 913d858473a1d44b2ced02e09fddfc4d320848b7 | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.utils.translation import ngettext
from django_tables2 import RequestConfig
from cookbook.forms import SyncForm, BatchEditForm
from cookbook.models import *
from cookbook.tables import SyncTable
@login_required
def sync(request):
if request.method == "POST":
form = SyncForm(request.POST)
if form.is_valid():
new_path = Sync()
new_path.path = form.cleaned_data['path']
new_path.storage = form.cleaned_data['storage']
new_path.last_checked = datetime.now()
new_path.save()
return redirect('data_sync')
else:
form = SyncForm()
monitored_paths = SyncTable(Sync.objects.all())
RequestConfig(request, paginate={'per_page': 25}).configure(monitored_paths)
return render(request, 'batch/monitor.html', {'form': form, 'monitored_paths': monitored_paths})
@login_required
def sync_wait(request):
return render(request, 'batch/waiting.html')
@login_required
def batch_import(request):
imports = RecipeImport.objects.all()
for new_recipe in imports:
recipe = Recipe(name=new_recipe.name, file_path=new_recipe.file_path, storage=new_recipe.storage, file_uid=new_recipe.file_uid, created_by=request.user)
recipe.save()
new_recipe.delete()
return redirect('list_recipe_import')
@login_required
def batch_edit(request):
if request.method == "POST":
form = BatchEditForm(request.POST)
if form.is_valid():
word = form.cleaned_data['search']
keywords = form.cleaned_data['keywords']
recipes = Recipe.objects.filter(name__icontains=word)
count = 0
for recipe in recipes:
edit = False
if keywords.__sizeof__() > 0:
recipe.keywords.add(*list(keywords))
edit = True
if edit:
count = count + 1
recipe.save()
msg = ngettext(
'Batch edit done. %(count)d recipe was updated.',
'Batch edit done. %(count)d Recipes where updated.',
count) % {
'count': count,
}
messages.add_message(request, messages.SUCCESS, msg)
return redirect('data_batch_edit')
else:
form = BatchEditForm()
return render(request, 'batch/edit.html', {'form': form})
class Object(object):
pass
@login_required
def statistics(request):
counts = Object()
counts.recipes = Recipe.objects.count()
counts.keywords = Keyword.objects.count()
counts.recipe_import = RecipeImport.objects.count()
counts.recipes_no_keyword = Recipe.objects.filter(keywords=None).count()
return render(request, 'stats.html', {'counts': counts})
| 30.212121 | 160 | 0.638917 |
06852358b2c9bcaa134b1c5c31a4a7de3d028ae2 | 10,764 | py | Python | flow/scenarios/loop_merge/scenario.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 16 | 2018-05-25T06:30:28.000Z | 2020-08-08T00:03:47.000Z | flow/scenarios/loop_merge/scenario.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 46 | 2018-05-22T21:32:55.000Z | 2019-06-12T13:10:02.000Z | flow/scenarios/loop_merge/scenario.py | berkeleyflow/flow | bed5ec959aaf0eaa8dbc7fa03f0c3fd3f0184b80 | [
"MIT"
] | 6 | 2018-06-22T14:59:14.000Z | 2019-08-29T06:00:34.000Z | from flow.scenarios.base_scenario import Scenario
from flow.core.params import InitialConfig
from flow.core.traffic_lights import TrafficLights
from numpy import pi
import numpy as np
ADDITIONAL_NET_PARAMS = {
# radius of the loops
"ring_radius": 50,
# length of the straight edges connected the outer loop to the inner loop
"lane_length": 75,
# number of lanes in the inner loop
"inner_lanes": 3,
# number of lanes in the outer loop
"outer_lanes": 2,
# max speed limit in the network
"speed_limit": 30,
# resolution of the curved portions
"resolution": 40,
}
class TwoLoopsOneMergingScenario(Scenario):
def __init__(self, name, generator_class, vehicles, net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLights()):
"""Initializes a two loop scenario where one loop merging in and out of
the other.
Requires from net_params:
- ring_radius: radius of the loops
- lane_length: length of the straight edges connected the outer loop to
the inner loop
- inner_lanes: number of lanes in the inner loop
- outer_lanes: number of lanes in the outer loop
- speed_limit: max speed limit in the network
- resolution: resolution of the curved portions
See Scenario.py for description of params.
"""
for p in ADDITIONAL_NET_PARAMS.keys():
if p not in net_params.additional_params:
raise KeyError('Network parameter "{}" not supplied'.format(p))
radius = net_params.additional_params["ring_radius"]
x = net_params.additional_params["lane_length"]
self.junction_length = 0.3
self.intersection_length = 25.5 # calibrate when the radius changes
net_params.additional_params["length"] = \
2 * x + 2 * pi * radius + \
2 * self.intersection_length + 2 * self.junction_length
num_vehicles = vehicles.num_vehicles
num_merge_vehicles = sum("merge" in vehicles.get_state(veh_id, "type")
for veh_id in vehicles.get_ids())
self.n_inner_vehicles = num_merge_vehicles
self.n_outer_vehicles = num_vehicles - num_merge_vehicles
radius = net_params.additional_params["ring_radius"]
length_loop = 2 * pi * radius
self.length_loop = length_loop
super().__init__(name, generator_class, vehicles, net_params,
initial_config, traffic_lights)
def specify_edge_starts(self):
"""
See parent class
"""
r = self.net_params.additional_params["ring_radius"]
lane_length = self.net_params.additional_params["lane_length"]
ring_edgelen = pi * r
edgestarts = [
("left", self.intersection_length),
("center", ring_edgelen + 2 * self.intersection_length),
("bottom", 2 * ring_edgelen + 2 * self.intersection_length),
("right", 2 * ring_edgelen + lane_length
+ 2 * self.intersection_length + self.junction_length),
("top", 3 * ring_edgelen + lane_length
+ 2 * self.intersection_length + 2 * self.junction_length)
]
return edgestarts
def specify_internal_edge_starts(self):
"""
See parent class
"""
r = self.net_params.additional_params["ring_radius"]
lane_length = self.net_params.additional_params["lane_length"]
ring_edgelen = pi * r
internal_edgestarts = [
(":top_left", 0),
(":bottom_left", ring_edgelen + self.intersection_length),
(":bottom_right", 2 * ring_edgelen + lane_length
+ 2 * self.intersection_length),
(":top_right", 3 * ring_edgelen + lane_length
+ 2 * self.intersection_length + self.junction_length)
]
return internal_edgestarts
def gen_custom_start_pos(self, initial_config, num_vehicles, **kwargs):
"""
See parent class
Vehicles with the prefix "merge" are placed in the merge ring,
while all other vehicles are placed in the ring.
"""
x0 = initial_config.x0
# changes to x0 in kwargs suggests a switch in between rollouts,
# and so overwrites anything in initial_config
if "x0" in kwargs:
x0 = kwargs["x0"]
random_scale = \
self.initial_config.additional_params.get("gaussian_scale", 0)
bunching = initial_config.bunching
# changes to bunching in kwargs suggests a switch in between rollouts,
# and so overwrites anything in initial_config
if "bunching" in kwargs:
bunching = kwargs["bunching"]
merge_bunching = 0
if "merge_bunching" in initial_config.additional_params:
merge_bunching = initial_config.additional_params["merge_bunching"]
num_vehicles = self.vehicles.num_vehicles
num_merge_vehicles = \
sum("merge" in self.vehicles.get_state(veh_id, "type")
for veh_id in self.vehicles.get_ids())
radius = self.net_params.additional_params["ring_radius"]
lane_length = self.net_params.additional_params["lane_length"]
startpositions = []
startlanes = []
length_loop = 2 * pi * radius
try:
increment_loop = \
(self.length_loop - bunching) \
* self.net_params.additional_params["inner_lanes"] \
/ (num_vehicles - num_merge_vehicles)
# x = [x0] * initial_config.lanes_distribution
if self.initial_config.additional_params.get("ring_from_right",
False):
x = [dict(self.edgestarts)["right"]] * \
self.net_params.additional_params["inner_lanes"]
else:
x = [x0] * self.net_params.additional_params["inner_lanes"]
car_count = 0
lane_count = 0
while car_count < num_vehicles - num_merge_vehicles:
# collect the position and lane number of each new vehicle
pos = self.get_edge(x[lane_count])
# ensures that vehicles are not placed in an internal junction
while pos[0] in dict(self.internal_edgestarts).keys():
# find the location of the internal edge in
# total_edgestarts, which has the edges ordered by position
edges = [tup[0] for tup in self.total_edgestarts]
indx_edge = next(i for i, edge in enumerate(edges)
if edge == pos[0])
# take the next edge in the list, and place the car at the
# beginning of this edge
if indx_edge == len(edges)-1:
next_edge_pos = self.total_edgestarts[0]
else:
next_edge_pos = self.total_edgestarts[indx_edge+1]
x[lane_count] = next_edge_pos[1]
pos = (next_edge_pos[0], 0)
startpositions.append(pos)
startlanes.append(lane_count)
x[lane_count] = \
(x[lane_count] + increment_loop
+ random_scale * np.random.randn()) % length_loop
# increment the car_count and lane_num
car_count += 1
lane_count += 1
# if the lane num exceeds the number of lanes the vehicles
# should be distributed on in the network, reset
if lane_count >= \
self.net_params.additional_params["inner_lanes"]:
lane_count = 0
except ZeroDivisionError:
pass
length_merge = pi * radius + 2 * lane_length
try:
increment_merge = \
(length_merge - merge_bunching) * \
initial_config.lanes_distribution / num_merge_vehicles
if self.initial_config.additional_params.get("merge_from_top",
False):
x = [dict(self.edgestarts)["top"] - x0] * \
self.net_params.additional_params["outer_lanes"]
else:
x = [dict(self.edgestarts)["bottom"] - x0] * \
self.net_params.additional_params["outer_lanes"]
car_count = 0
lane_count = 0
while car_count < num_merge_vehicles:
# collect the position and lane number of each new vehicle
pos = self.get_edge(x[lane_count])
# ensures that vehicles are not placed in an internal junction
while pos[0] in dict(self.internal_edgestarts).keys():
# find the location of the internal edge in
# total_edgestarts, which has the edges ordered by position
edges = [tup[0] for tup in self.total_edgestarts]
indx_edge = next(i for i, edge in enumerate(edges)
if edge == pos[0])
# take the next edge in the list, and place the car at the
# beginning of this edge
if indx_edge == len(edges)-1:
next_edge_pos = self.total_edgestarts[0]
else:
next_edge_pos = self.total_edgestarts[indx_edge+1]
x[lane_count] = next_edge_pos[1]
pos = (next_edge_pos[0], 0)
startpositions.append(pos)
startlanes.append(lane_count)
if self.initial_config.additional_params.get(
"merge_from_top", False):
x[lane_count] = x[lane_count] - increment_merge + \
random_scale*np.random.randn()
else:
x[lane_count] = x[lane_count] + increment_merge + \
random_scale*np.random.randn()
# increment the car_count and lane_num
car_count += 1
lane_count += 1
# if the lane num exceeds the number of lanes the vehicles
# should be distributed on in the network, reset
# if lane_count >= self.initial_config.lane_distribution
if lane_count >= \
self.net_params.additional_params["outer_lanes"]:
lane_count = 0
except ZeroDivisionError:
pass
return startpositions, startlanes
| 40.314607 | 79 | 0.572092 |
1e1f27d60cbbea2b429057dfbe650b8af133fb26 | 3,359 | py | Python | config/conf.py | mimimimizuki/sabrina-hoppe-everyday-eye-movements-predict-personality | 731599ce8af977a727f2759aee5a54b18746d664 | [
"BSD-3-Clause"
] | 9 | 2020-05-30T16:34:10.000Z | 2022-02-21T05:57:47.000Z | config/conf.py | mimimimizuki/sabrina-hoppe-everyday-eye-movements-predict-personality | 731599ce8af977a727f2759aee5a54b18746d664 | [
"BSD-3-Clause"
] | 1 | 2020-03-17T12:16:15.000Z | 2020-03-18T14:37:10.000Z | config/conf.py | mimimimizuki/sabrina-hoppe-everyday-eye-movements-predict-personality | 731599ce8af977a727f2759aee5a54b18746d664 | [
"BSD-3-Clause"
] | 3 | 2019-04-19T14:04:08.000Z | 2021-10-21T11:17:28.000Z | import numpy as np
# global parameters
n_participants = 42
n_traits = 7
max_n_feat = 207
max_n_iter = 100
all_window_sizes = [5, 15, 30, 45, 60, 75, 90, 105, 120, 135]
all_shop_window_sizes = [5, 15] # at least 3/4 of the people have a time window in these times
# cross validation paramters
n_inner_folds = 3
n_outer_folds = 5
# Random Forest Parameters
tree_max_features = 15
tree_max_depth = 5
n_estimators = 100
max_n_jobs = 5
# given a window size, determine step size correctly for even and odd numbers
def get_step_size(window_size):
step_size = window_size / 2.0
if step_size * 10 % 2 == 0:
step_size = int(step_size)
return step_size
# relative paths
data_folder = 'data'
info_folder = 'info'
feature_folder = 'features'
result_folder = 'results'
figure_folder = 'figures'
annotation_path = info_folder + '/annotation.csv'
binned_personality_file = info_folder + '/binned_personality.csv'
personality_sex_age_file = info_folder + '/personality_sex_age.csv'
# load the personality trait names from file and map them to abbreviations
traitlabels = np.loadtxt(binned_personality_file, delimiter=',', dtype=str)[0, 1:]
def get_abbr(s):
return ''.join(item[0] for item in s.split() if item[0].isupper())
medium_traitlabels = [get_abbr(s) if (" " in s) else s for s in traitlabels]
short_traitlabels = [''.join(item[0] for item in tl.split() if item[0].isupper()) for tl in traitlabels]
# dynamically create relative paths for result files to create
def get_result_folder(annotation_val):
return result_folder + '/A' + str(annotation_val)
def get_result_filename(annotation_val, trait, shuffle_labels, i, add_suffix=False):
filename = get_result_folder(annotation_val) + '/' + short_traitlabels[trait]
if shuffle_labels:
filename += '_rnd'
filename += '_' + str(i).zfill(3)
if add_suffix:
filename += '.npz'
return filename
def get_feature_folder(participant):
return feature_folder + '/Participant' + str(participant).zfill(2)
def get_merged_feature_files(window_size):
return feature_folder + '/merged_features_' + str(window_size) + '.csv', feature_folder + '/merged_traits_' + str(window_size) + '.csv', feature_folder + '/merged_ids_' + str(window_size) + '.csv'
def get_data_folder(participant):
return data_folder + '/Participant' + str(participant).zfill(2)
def get_window_times_file(participant, window_size):
return get_feature_folder(participant) + "/window_times_" + str(window_size) + '.npy'
def get_window_features_file(participant, window_size):
return get_feature_folder(participant) + "/window_features_" + str(window_size) + '.npy'
def get_overall_features_file(participant):
return get_feature_folder(participant) + "/overall_features.npy"
# parameters for fixation/saccade detection
fixation_radius_threshold = 0.025
fixation_duration_threshold = 0.1
saccade_min_velocity = 2
max_saccade_duration = 0.5
# annotation constants (as given as arguments to train_classifier, and as used for file names in result_folder)
annotation_all = 0
annotation_ways = 1
annotation_shop = 2
annotation_values = [annotation_all, annotation_ways, annotation_shop]
# annotations used in merged_ids_* files in the feature_folder
# column 1
time_window_annotation_wayI = 1
time_window_annotation_shop = 2
time_window_annotation_wayII = 3
# column 2
time_window_annotation_halfI = 1
time_window_annotation_halfII = 2
| 34.27551 | 197 | 0.772551 |
59daea138b70578be821b4f97b465557f0cfa947 | 11,548 | py | Python | bokeh/application/application.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | bokeh/application/application.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | bokeh/application/application.py | g-parki/bokeh | 664ead5306bba64609e734d4105c8aa8cfb76d81 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the ``Application`` class.
Application instances are factories for creating new Bokeh Documents.
When a Bokeh server session is initiated, the Bokeh server asks the Application
for a new Document to service the session. To do this, the Application first
creates a new empty Document, then it passes this new Document to the
``modify_document`` method of each of its handlers. When all handlers have
updated the Document, it is used to service the user session.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from abc import ABCMeta, abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
ClassVar,
Dict,
List,
Tuple,
)
# Bokeh imports
from ..core.types import ID
from ..document import Document
from ..settings import settings
if TYPE_CHECKING:
from tornado.httputil import HTTPServerRequest
from ..server.session import ServerSession
from .handlers.handler import Handler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Application',
'ServerContext',
'SessionContext',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
Callback = Callable[[], None]
class Application:
''' An Application is a factory for Document instances.
.. autoclasstoc::
'''
# This is so that bokeh.io.show can check if a passed in object is an
# Application without having to import Application directly. This module
# depends on tornado and we have made a commitment that "basic" modules
# will function without bringing in tornado.
_is_a_bokeh_application_class: ClassVar[bool] = True
_static_path: str | None
_handlers: List[Handler]
_metadata: Dict[str, Any] | None
def __init__(self, *handlers: Handler, metadata: Dict[str, Any] | None = None) -> None:
''' Application factory.
Args:
handlers (seq[Handler]): List of handlers to call.
The URL is taken from the first one only.
Keyword Args:
metadata (dict): arbitrary user-supplied JSON data to make available
with the application.
The server will provide a URL ``http://applicationurl/metadata``
which returns a JSON blob of the form:
.. code-block:: json
{
"data": {
"hi": "hi",
"there": "there"
},
"url": "/myapp"
}
The user-supplied metadata is returned as-is under the
``"data"`` key in the blob.
'''
self._static_path = None
self._handlers = []
self._metadata = metadata
for h in handlers:
self.add(h)
# Properties --------------------------------------------------------------
@property
def handlers(self) -> Tuple[Handler, ...]:
''' The ordered list of handlers this Application is configured with.
'''
return tuple(self._handlers)
@property
def metadata(self) -> Dict[str, Any] | None:
''' Arbitrary user-supplied metadata to associate with this application.
'''
return self._metadata
@property
def safe_to_fork(self) -> bool:
'''
'''
return all(handler.safe_to_fork for handler in self._handlers)
@property
def static_path(self) -> str | None:
''' Path to any (optional) static resources specified by handlers.
'''
return self._static_path
# Public methods ----------------------------------------------------------
def add(self, handler: Handler) -> None:
''' Add a handler to the pipeline used to initialize new documents.
Args:
handler (Handler) : a handler for this Application to use to
process Documents
'''
self._handlers.append(handler)
# make sure there is at most one static path
static_paths = {h.static_path() for h in self.handlers}
static_paths.discard(None)
if len(static_paths) > 1:
raise RuntimeError("More than one static path requested for app: %r" % list(static_paths))
elif len(static_paths) == 1:
self._static_path = static_paths.pop()
else:
self._static_path = None
def create_document(self) -> Document:
''' Creates and initializes a document using the Application's handlers.
'''
doc = Document()
self.initialize_document(doc)
return doc
def initialize_document(self, doc: Document) -> None:
''' Fills in a new document using the Application's handlers.
'''
for h in self._handlers:
# TODO (havocp) we need to check the 'failed' flag on each handler
# and build a composite error display. In develop mode, we want to
# somehow get these errors to the client.
h.modify_document(doc)
if h.failed:
log.error("Error running application handler %r: %s %s ", h, h.error, h.error_detail)
if settings.perform_document_validation():
doc.validate()
def on_server_loaded(self, server_context: ServerContext) -> None:
''' Invoked to execute code when a new session is created.
This method calls ``on_server_loaded`` on each handler, in order,
with the server context passed as the only argument.
'''
for h in self._handlers:
h.on_server_loaded(server_context)
def on_server_unloaded(self, server_context: ServerContext) -> None:
''' Invoked to execute code when the server cleanly exits. (Before
stopping the server's ``IOLoop``.)
This method calls ``on_server_unloaded`` on each handler, in order,
with the server context passed as the only argument.
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
for h in self._handlers:
h.on_server_unloaded(server_context)
async def on_session_created(self, session_context: SessionContext) -> None:
''' Invoked to execute code when a new session is created.
This method calls ``on_session_created`` on each handler, in order,
with the session context passed as the only argument.
May return a ``Future`` which will delay session creation until the
``Future`` completes.
'''
for h in self._handlers:
await h.on_session_created(session_context)
return None
async def on_session_destroyed(self, session_context: SessionContext) -> None:
''' Invoked to execute code when a session is destroyed.
This method calls ``on_session_destroyed`` on each handler, in order,
with the session context passed as the only argument.
Afterwards, ``session_context.destroyed`` will be ``True``.
'''
for h in self._handlers:
await h.on_session_destroyed(session_context)
return None
def process_request(self, request: HTTPServerRequest) -> Dict[str, Any]:
''' Processes incoming HTTP request returning a dictionary of
additional data to add to the session_context.
Args:
request: HTTP request
Returns:
A dictionary of JSON serializable data to be included on
the session context.
'''
request_data: Dict[str, Any] = {}
for h in self._handlers:
request_data.update(h.process_request(request))
return request_data
class ServerContext(metaclass=ABCMeta):
''' A harness for server-specific information and tasks related to
collections of Bokeh sessions.
*This base class is probably not of interest to general users.*
'''
# Properties --------------------------------------------------------------
@property
@abstractmethod
def sessions(self) -> List[ServerSession]:
''' ``SessionContext`` instances belonging to this application.
*Subclasses must implement this method.*
'''
pass
class SessionContext(metaclass=ABCMeta):
''' A harness for server-specific information and tasks related to
Bokeh sessions.
*This base class is probably not of interest to general users.*
'''
_server_context: ServerContext
_id: ID
def __init__(self, server_context: ServerContext, session_id: ID) -> None:
'''
'''
self._server_context = server_context
self._id = session_id
# Properties --------------------------------------------------------------
@property
@abstractmethod
def destroyed(self) -> bool:
''' If ``True``, the session has been discarded and cannot be used.
A new session with the same ID could be created later but this instance
will not come back to life.
'''
pass
@property
def id(self) -> ID:
''' The unique ID for the session associated with this context.
'''
return self._id
@property
def server_context(self) -> ServerContext:
''' The server context for this session context
'''
return self._server_context
# Public methods ----------------------------------------------------------
@abstractmethod
def with_locked_document(self, func: Callable[[Document], Awaitable[None]]) -> Awaitable[None]:
''' Runs a function with the document lock held, passing the
document to the function.
*Subclasses must implement this method.*
Args:
func (callable): function that takes a single parameter (the Document)
and returns ``None`` or a ``Future``
Returns:
a ``Future`` containing the result of the function
'''
pass
SessionDestroyedCallback = Callable[[SessionContext], None]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 31.812672 | 102 | 0.542085 |
129a0a2f94c9cf24bd93a10b65e1ab0ec0d2bd9f | 4,310 | py | Python | sandbox/settings/base.py | maerteijn/wagtail-email-subscription | d5420aed0b133f9eced3a59425f63ae6b767d686 | [
"MIT"
] | 2 | 2021-05-03T14:56:07.000Z | 2021-07-22T06:38:22.000Z | sandbox/settings/base.py | maerteijn/wagtail-email-subscription | d5420aed0b133f9eced3a59425f63ae6b767d686 | [
"MIT"
] | 10 | 2021-05-04T10:44:17.000Z | 2022-03-25T14:53:30.000Z | sandbox/settings/base.py | maerteijn/wagtail-email-subscription | d5420aed0b133f9eced3a59425f63ae6b767d686 | [
"MIT"
] | 1 | 2021-05-07T09:42:05.000Z | 2021-05-07T09:42:05.000Z | """
Django settings for sandbox project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-lbe!z1hp#*_)0z*x!^^sm7&rznn((^2@pko#z6suoq4(1h#zj="
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.contrib.settings",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.core",
"modelcluster",
"taggit",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"wagtail_email_subscription",
"wagtail_email_subscription.contrib.formpage",
"generic_chooser",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
ROOT_URLCONF = "urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"wagtail.contrib.settings.context_processors.settings",
],
},
},
]
WSGI_APPLICATION = "wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
},
},
"root": {
"handlers": ["console"],
"level": "DEBUG",
},
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Amsterdam"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Wagtail settings
WAGTAIL_SITE_NAME = "mysite"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = "http://example.com"
| 25.502959 | 91 | 0.683063 |
29ed476f6c184936e591c9b7ff3ba306a1996f91 | 2,249 | py | Python | baconian/benchmark/run_benchmark.py | yitongx/baconian-public | a67e23c6bc6bfe7019ec9532a3d18f06aed6bbbb | [
"MIT"
] | 69 | 2020-01-31T17:44:43.000Z | 2022-03-28T13:09:11.000Z | baconian/benchmark/run_benchmark.py | yitongx/baconian-project | e84508da60877e387344133a11039edaac35c5bf | [
"MIT"
] | 5 | 2019-04-28T07:24:26.000Z | 2020-01-29T01:49:51.000Z | baconian/benchmark/run_benchmark.py | yitongx/baconian-project | e84508da60877e387344133a11039edaac35c5bf | [
"MIT"
] | 6 | 2019-05-04T02:18:11.000Z | 2019-12-04T22:05:52.000Z | from baconian.benchmark.ddpg_benchmark import mountiancar_task_fn, pendulum_task_fn
from baconian.benchmark.dyna_benchmark import dyna_pendulum_task_fn
from baconian.benchmark.mpc_benchmark import mpc_pendulum_task_fn
from baconian.benchmark.ppo_benchmark import inverted_double_pendulum_bullet_env_task_fn, \
inverted_pendulum_bullet_env_task_fn, pendulum_env_task_fn, half_cheetah_bullet_env_task_fn
from baconian.benchmark.iLQR_benchmark import ilqr_pendulum_task_fn
from baconian.benchmark.dqn_benchmark import acrobot_task_fn, lunarlander_task_fn
import argparse
import os
import time
from baconian.config.global_config import GlobalConfig
from baconian.core.experiment_runner import duplicate_exp_runner
arg = argparse.ArgumentParser()
env_id_to_task_fn = {
'Pendulum-v0': {
'ddpg': pendulum_task_fn,
'dyna': dyna_pendulum_task_fn,
'mpc': mpc_pendulum_task_fn,
'ppo': pendulum_env_task_fn,
'ilqr': ilqr_pendulum_task_fn
},
'MountainCarContinuous-v0': {
'ddpg': mountiancar_task_fn,
},
'HalfCheetahBulletEnv-v0': {
'ppo': half_cheetah_bullet_env_task_fn,
},
'Acrobot-v1': {
'dqn': acrobot_task_fn,
},
'LunarLander-v2': {
'dqn': lunarlander_task_fn,
},
'InvertedPendulumBulletEnv-v0': {
'ppo': inverted_pendulum_bullet_env_task_fn
},
'InvertedDoublePendulumBulletEnv-v0': {
'ppo': inverted_double_pendulum_bullet_env_task_fn
}
}
alog_list = ['ddpg', 'dyna', 'mpc', 'ppo', 'ilqr', 'dqn']
arg.add_argument('--env_id', type=str, choices=list(env_id_to_task_fn.keys()))
arg.add_argument('--algo', type=str, choices=alog_list)
arg.add_argument('--count', type=int, default=1)
arg.add_argument('--cuda_id', type=int, default=-1)
args = arg.parse_args()
if __name__ == '__main__':
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
GlobalConfig().set('DEFAULT_LOG_PATH', os.path.join(CURRENT_PATH, 'benchmark_log', args.env_id, args.algo,
time.strftime("%Y-%m-%d_%H-%M-%S")))
ExpRootPath = GlobalConfig().DEFAULT_LOG_PATH
duplicate_exp_runner(args.count, env_id_to_task_fn[args.env_id][args.algo], gpu_id=args.cuda_id)
| 38.775862 | 110 | 0.724767 |
e28e1ea9ca89ee5300fb044ff9000ad4d8f4a86b | 25,794 | py | Python | ismore/tubingen/noninvasive_tubingen/eeg_feature_extraction.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | null | null | null | ismore/tubingen/noninvasive_tubingen/eeg_feature_extraction.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | 12 | 2020-07-31T18:58:31.000Z | 2022-02-10T14:36:00.000Z | ismore/tubingen/noninvasive_tubingen/eeg_feature_extraction.py | DerekYJC/bmi_python | 7b9cf3f294a33688db24b0863c1035e9cc6999ea | [
"Apache-2.0"
] | 4 | 2020-03-06T15:39:00.000Z | 2021-05-26T17:03:21.000Z | '''
Code for feature extraction methods/classes from EEG, to be used with a
decoder (similar to other types of feature extractors in riglib.bmi.extractor)
'''
from collections import OrderedDict
from scipy.signal import butter, lfilter
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
import spectrum
import time
#import math
# from riglib.filter import Filter
from ismore.filter import Filter
from copy import deepcopy
from riglib.bmi.extractor import FeatureExtractor
from utils.ringbuffer import RingBuffer
#from scipy.ndimage.filters import laplace
import nitime.algorithms as tsa
#from tools import nextpow2
#import statsmodels
# def levinson_durbin(s, nlags=10, isacov=False):
# '''Levinson-Durbin recursion for autoregressive processes
# Parameters
# ----------
# s : array_like
# If isacov is False, then this is the time series. If iasacov is true
# then this is interpreted as autocovariance starting with lag 0
# nlags : integer
# largest lag to include in recursion or order of the autoregressive
# process
# isacov : boolean
# flag to indicate whether the first argument, s, contains the
# autocovariances or the data series.
# Returns
# -------
# sigma_v : float
# estimate of the error variance ?
# arcoefs : ndarray
# estimate of the autoregressive coefficients
# pacf : ndarray
# partial autocorrelation function
# sigma : ndarray
# entire sigma array from intermediate result, last value is sigma_v
# phi : ndarray
# entire phi array from intermediate result, last column contains
# autoregressive coefficients for AR(nlags) with a leading 1
# Notes
# -----
# This function returns currently all results, but maybe we drop sigma and
# phi from the returns.
# If this function is called with the time series (isacov=False), then the
# sample autocovariance function is calculated with the default options
# (biased, no fft).
# '''
# s = np.asarray(s)
# order = nlags # rename compared to nitime
# #from nitime
# ##if sxx is not None and type(sxx) == np.ndarray:
# ## sxx_m = sxx[:order+1]
# ##else:
# ## sxx_m = ut.autocov(s)[:order+1]
# if isacov:
# sxx_m = s
# else:
# sxx_m = acovf(s)[:order + 1] # not tested
# phi = np.zeros((order + 1, order + 1), 'd')
# sig = np.zeros(order + 1)
# # initial points for the recursion
# phi[1, 1] = sxx_m[1] / sxx_m[0]
# sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
# for k in range(2, order + 1):
# phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
# sxx_m[1:k][::-1])) / sig[k-1]
# for j in range(1, k):
# phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
# sig[k] = sig[k-1] * (1 - phi[k, k]**2)
# sigma_v = sig[-1]
# arcoefs = phi[1:, -1]
# pacf_ = np.diag(phi).copy()
# pacf_[0] = 1.
# return sigma_v, arcoefs, pacf_, sig, phi # return everything
def extract_AR_psd(samples, freq_bands = []): #freq_bands (in Hz)
'''
Calculate the psd using the AR model coefficients for multiple channels. The psd is computed with a freq resolution = 1Hz.
For the channel selection we may want to keep this freq resolution but for the online procedure we may want to average certain frequency bands.
Parameters
----------
samples : np.ndarray of shape (n_time_points,)
Observed EEG voltages in microvolts
Returns
-------
np.ndarray of shape (nfft) or if freq_bands != [] then PSD shape = (len(freq_bands))
'''
#compute the psd with a freq_resolution = 1Hz /frequency_resolution
#a) aryule + arma2psd (spectrum library)
#b) correlation + levinson (spectrum)
order = 20
fs = 1000 #sampling freq in Hz
fs_down = 100 #sampling freq in Hz
#print time.time()
#t0 = time.time()
# acf = np.correlate(samples, samples, 'full')
# AC = acf[len(samples)-1:] #check length of the autoccoralation cuz levinson asks for a n+1 array being n the length of the signal.
# AR = spectrum.LEVINSON(AC,order)
# PSD = spectrum.arma2psd(AR[0], NFFT = fs)
# #import pdb; pdb.set_trace()
# PSD = PSD[len(PSD):len(PSD)/2-1:-1]
# periodogram
# f, PSD = tsa.periodogram(samples, fs)
#import pdb; pdb.set_trace()
#import pdb; pdb.set_trace()
#print time.time() - t0
#c) nitime.algorithms.autoregressive.AR_est_YW + nitime.algorithms.autoregressive.AR_psd (nitime)
# AR_coeffs, sigma_v = tsa.autoregressive.AR_est_YW(samples, order)
# # sigma_v, AR_coeffs, _, _, _ = statsmodels.tsa.stattools.levinson_durbin(samples, order= order, isacov=False)
# n_freqs = fs
# norm_freqs, AR_psd = tsa.autoregressive.AR_psd (AR_coeffs, sigma_v, n_freqs, sides = 'onesided')
# # n_freqs = tools.nextpow2(len(samples))
# n_freqs = fs
# norm_freqs, AR_psd = tsa.autoregressive.AR_psd (AR_coeffs, sigma_v, n_freqs, sides = 'onesided')
# #d) statsmodels?
# AR_coeffs, sigma_v = levinson_durbin(samples, order, isacov = False)
# nitime using levinson durbin method
AR_coeffs_LD, sigma_v = tsa.autoregressive.AR_est_LD(samples, order)
n_freqs = fs_down
norm_freqs, PSD = tsa.autoregressive.AR_psd (AR_coeffs_LD, sigma_v, n_freqs, sides = 'onesided')
#import pdb; pdb.set_trace()
PSD = PSD[1:] #get rid of PSD in freq = 0
PSD = np.log(PSD) #compute the Log of the PSD
if freq_bands != []:
#import pdb; pdb.set_trace()
if type(freq_bands[0]) == list:# If more than one band is chosen per channel
for i in np.arange(len(freq_bands)):
#import pdb; pdb.set_trace()
try:
PSD_mean = np.hstack([PSD_mean, np.mean(PSD[freq_bands[i][0]-1:freq_bands[i][1]])])
except NameError:
PSD_mean = np.mean(PSD[freq_bands[i][0]-1:freq_bands[i][1]])
else:
try:
PSD_mean = np.hstack([PSD_mean, np.mean(PSD[freq_bands[0]-1:freq_bands[1]])])
except NameError:
PSD_mean = np.mean(PSD[freq_bands[0]-1:freq_bands[1]])
#import pdb; pdb.set_trace()
#print 'PSD', PSD
PSD = PSD_mean.copy()
#to do, compute psd of frequency_resolution Hz freq bins
# if the input argument frequency_resolution != 1, then compute the average of the frequency bins determined
#import pdb; pdb.set_trace()
#compute the power in the desired freq bands and return these values
return PSD
def extract_MTM_psd(samples, NW = 3): #frequency_resoltuions (in Hz)
'''
Extract spectral features from a block of time series samples
Parameters
----------
cont_samples : np.ndarray of shape (n_channels, n_samples)
Raw voltage time series (one per channel) from which to extract spectral features
Returns
-------
EEG_power : np.ndarray of shape (n_channels * n_features, 1)
Multi-band power estimates for each channel, for each band specified when the feature extractor was instantiated.
'''
pass
#compute the psd with a freq_resolution = 1Hz /frequency_resolution
# multitaper psd (already implemented in extractor.py)
# if the input argument frequency_resolution != 1, then compute the average of the frequency bins determined
# dictionary mapping feature names to the corresponding functions defined above
FEATURE_FUNCTIONS_DICT = {
'AR': extract_AR_psd,
'MTM': extract_MTM_psd,
}
# NEIGHBOUR_CHANNELS_DICT = { #define the neighbour channels for each channel (for the Laplacian filter)
# '1': [2,3,4],
# '2': [5,6],
# '3': [4,5],
# }
NEIGHBOUR_CHANNELS_DICT = {}
class EEGMultiFeatureExtractor(FeatureExtractor):
'''
Extract different types of EEG features from raw EEG voltages
'''
feature_type = 'eeg_multi_features'
def __init__(self, source=None, channels_2train = [], eeg_channels=[], feature_names=FEATURE_FUNCTIONS_DICT.keys(), feature_fn_kwargs={}, win_len=0.5, fs=1000, neighbour_channels=NEIGHBOUR_CHANNELS_DICT, artifact_rejection = True, calibration_data = True, eog_coeffs = None, TH_lowF = [], TH_highF = [], bipolar_EOG = True): #brainamp_channels = []):
'''
Constructor for EEGMultiFeatureExtractor
Parameters
----------
source : MultiChanDataSource instance, optional, default=None
DataSource interface to separate process responsible for collecting data from the EEG recording system
channels : iterable of strings, optional, default=[]
Names of channels from which to extract data
feature_names : iterable, optional, default=[]
Types of features to include in the extractor's output. See FEATURE_FUNCTIONS_DICT for available options
feature_fn_kwargs : dict, optional, default={}
Optional kwargs to pass to the individual feature extractors
win_len : float, optional, default=0.2
Length of time (in seconds) of raw EEG data to use for feature extraction
fs : float, optional, default=1000
Sampling rate for the EEG data
Returns
-------
EEGMultiFeatureExtractor instance
'''
self.source = source
self.eeg_channels = eeg_channels #channels to use for the deocding online
#self.brainamp_channels = brainamp_channels # all the channels being recorded from the brainamp source and stored in the hdf file (raw+filt)
self.feature_names = feature_names
self.feature_fn_kwargs = feature_fn_kwargs
self.win_len = win_len
self.neighbour_channels = neighbour_channels
self.channels_2train = channels_2train
self.artifact_rejection = artifact_rejection
self.calibration_data = calibration_data
self.bipolar_EOG = bipolar_EOG
self.eog_coeffs = eog_coeffs
self.TH_lowF = TH_lowF
self.TH_highF = TH_highF
if source is not None:
self.fs = source.source.update_freq
else:
self.fs = fs
# ??? Everything unclear
if channels_2train != []:
if 'chan' in self.channels_2train[0]:
self.n_features = np.sum([len(self.feature_fn_kwargs[self.feature_names[0]]['freq_bands'][i[4:]]) for i in self.channels_2train])
else:
self.n_features = np.sum([len(self.feature_fn_kwargs[self.feature_names[0]]['freq_bands'][i]) for i in self.channels_2train])
else:
print 'Warning: the "channels_2train" variable is empty!'
self.n_win_pts = int(self.win_len * self.fs)
n_channels = len(self.eeg_channels)
def get_samples(self):
'''
Get samples from this extractor's MultiChanDataSource.
Parameters
----------
None
Returns
-------
Voltage samples of shape (n_channels, n_time_points)
'''
return self.source.get(self.n_win_pts, self.eeg_channels)['data']
def Laplacian_filter(self, samples):
'''
Parameters
----------
samples : np.ndarray of shape (n_channels, n_time_points)
Filtered (with BPfilter) EEG voltages. Laplacian filter will be applied to those
Returns
-------
samples : np.ndarray of shape (n_channels, n_time_points)
'''
#samples_copy = deepcopy(samples)
samples_copy = samples.copy()
#import pdb; pdb.set_trace()
# apply Laplacian spatial filter to each channel separately
for k, neighbours in enumerate(self.neighbour_channels): #for loop on number of electrodes
samples_laplace = samples_copy[neighbours].copy()
for n in range(len(self.neighbour_channels[neighbours])):
samples_laplace = np.vstack([samples_laplace, samples_copy[self.neighbour_channels[neighbours][n]]])
samples[neighbours]['data'] = samples_laplace[0,:]['data'] - np.mean(samples_laplace[1:,:]['data'], axis = 0)
#import pdb; pdb.set_trace()
return samples
def Laplacian_filter_online(self, samples):
'''
Parameters
----------
samples : np.ndarray of shape (n_channels, n_time_points)
Filtered (with BPfilter) EEG voltages. Laplacian filter will be applied to those
Returns
-------
samples : np.ndarray of shape (n_channels, n_time_points)
'''
#samples_copy = deepcopy(samples)
samples_copy = samples.copy()
# apply Laplacian spatial filter to each channel separately
for k, neighbours in enumerate(self.neighbour_channels): #for loop on number of electrodes
#import pdb; pdb.set_trace()
#index = neighbours.index('_')
#import pdb; pdb.set_trace()
try:
channel_index_in_samples = self.eeg_channels.index(neighbours)
except: # In case the non-filtered data was used for training
channel_index_in_samples = self.eeg_channels.index(neighbours + '_filt')
#print 'channel_index_in_samples', self.eeg_channels, channel_index_in_samples[0]
samples_laplace = samples_copy[channel_index_in_samples,:].copy()
# for kk, channel in enumerate(self.eeg_channels):
# if channel == neighbours:
# samples_laplace = samples_copy[kk,:].copy()
#samples_laplace = samples_copy[int(neighbours[:index])-1,:].copy()
for n in range(len(self.neighbour_channels[neighbours])):
try:
channel_index_in_samples2 = self.eeg_channels.index(self.neighbour_channels[neighbours][n])
except:# In case the non-filtered data was used for training. In that case the neighbour channels do not have the '_filt' ending
channel_index_in_samples2 = self.eeg_channels.index(self.neighbour_channels[neighbours][n]+ '_filt')
#index = self.neighbour_channels[neighbours][n].index('_')
# samples_laplace = np.vstack([samples_laplace, samples_copy[int(self.neighbour_channels[neighbours][n][:index])-1,:]])
samples_laplace = np.vstack([samples_laplace, samples_copy[channel_index_in_samples2,:]])
# samples[int(neighbours[:index])-1,:] = samples_laplace[0,:] - np.mean(samples_laplace[1:,:], axis = 0)
samples[channel_index_in_samples,:] = samples_laplace[0,:] - np.mean(samples_laplace[1:,:], axis = 0)
return samples
def extract_features(self, samples,chan_freq):
'''
Parameters
----------
samples : np.ndarray of shape (n_channels, n_time_points)
Raw EEG voltages from which to extract features
Returns
-------
features : np.ndarray of shape (n_features, 1)
'''
# apply band-pass separately to each channel
# extract actual features
features = None
for name in self.feature_names:
fn = FEATURE_FUNCTIONS_DICT[name]
if name in self.feature_fn_kwargs:
kwargs = self.feature_fn_kwargs[name]
if kwargs['freq_bands'] != dict():#might give an error if kwargs doesnt have a key = freq_bands! Check!
freq_band = kwargs['freq_bands'][chan_freq]#changed
else:
freq_band = []
else:
kwargs = {} # empty dictionary of kwargs
#new_features = fn(samples, **kwargs)
# import pdb; pdb.set_trace()
new_features = fn(samples, freq_band)#changed
if features == None:
features = new_features
else:
features = np.vstack([features, new_features])
#import pdb; pdb.set_trace()
return features
#return features.reshape(-1)
def __call__(self):
'''
Get samples from this extractor's data source, filter them and extract features. Used for the online decoding
'''
# ??? fs_down?
self.fs_down = 100
samples = self.get_samples()
# If all samples are equal to 0, set samples to random values ranging from 0 to 1
if np.all(samples == 0):
print 'All eeg samples are "0"'
samples = np.random.rand(samples.shape[0],samples.shape[1])
# ???
samples = samples[:,np.arange(0,samples.shape[1],self.fs/self.fs_down)]
features_lowF = np.array([])
features_highF = np.array([])
#
# Perform online artifact removal
#
if self.artifact_rejection == 1:
# EOG artifact removal
if self.bipolar_EOG == True:
eog_v = samples[-1,:]
eog_h = samples[-2,:]
neog_channs = 2
else:
eog_v = samples[-2,:] - samples[-1,:]
eog_h = samples[-4,:] - samples[-3,:]
neog_channs = 4
# Remove the EOG channels from the samples
samples = samples[:-neog_channs,:]
# Removal of low and high frequency artifacts
# # Remove the last four channels taht are the EOG
# ??? again?
for chan in range(len(self.eeg_channels)-neog_channs):
# ??? What is extract_AR_psd doing?
lowF_feature_ch = extract_AR_psd(samples[chan,:], [1,4])
highF_feature_ch = extract_AR_psd(samples[chan,:], [30,48])
# Initialize or append features of current channel
if features_lowF:
features_lowF = lowF_feature_ch
features_highF = highF_feature_ch
else:
features_lowF = np.hstack([features_lowF, lowF_feature_ch])
features_highF = np.hstack([features_highF, highF_feature_ch])
# If all features are lower than the specified threshold value keep window
if np.all(features_lowF < self.TH_lowF) & np.all(features_highF < self.TH_highF):
current_window_rejected = False
else:
current_window_rejected = True
# ???
samples = samples - self.eog_coeffs[0,:].reshape(-1,1)*np.matlib.repmat(eog_v,len(self.eog_coeffs[0,:]),1) - self.eog_coeffs[1,:].reshape(-1,1)*np.matlib.repmat(eog_h,len(self.eog_coeffs[1,:]),1)
# Filter the data using a Laplacian filter
samples = self.Laplacian_filter_online(samples)
#
# Perform EEG feature extraction
#
features = np.array([])
# Loop over all channels
for k, chan_freq in enumerate(self.channels_2train):
# ??? Why try/except?
try:
index = self.eeg_channels.index(chan_freq)
except:
index = self.eeg_channels.index(chan_freq + '_filt')
features_chan = self.extract_features(samples[index],chan_freq)
# Initialize or append features of current channel
if features:
features = features_chan
else:
features = np.hstack([features, features_chan])
# Return features and if the current window was rejected
if self.artifact_rejection == 1:
return features, current_window_rejected
else:
return features
def sim_call_rest(self):
# Generate artificial rest data
fsample = 1000.00 #Sample frequency in Hz
f = 10 # in Hz
rest_amp = 10
cnt = 1
cnt_noise = 1
samples = []
for k in self.eeg_channels: #for loop on number of electrodes
#print 'channel in sim rest', k
if k in ['chan13_filt', 'chan14_filt', 'chan18_filt', 'chan19_filt']:
rest_noise = rest_amp*0.1*np.random.randn(self.n_win_pts) #10% of signal amplitude
rest_signal = np.zeros(self.n_win_pts)
for i in np.arange(self.n_win_pts):
rest_signal[i] = rest_amp*cnt * math.sin((f+cnt-1)*2*math.pi*t[i]) + rest_noise[i] #rest sinusoidal signal
cnt += 1
else:
rest_signal = rest_amp*0.1*cnt_noise*np.random.randn(self.n_win_pts) #10% of signal amplitude. only noise
cnt_noise += 1
if samples == []:
samples = rest_signal
else:
samples = np.vstack([samples, rest_signal])
#samples = np.vstack([samples, rest_signal])
#samples[k] = rest_signal
# print 'samples'
#print samples
#import pdb; pdb.set_trace()
# if np.all(samples == 0):
# print 'all samples = 0'
# samples = np.random.rand(samples.shape[0],samples.shape[1])
samples = self.Laplacian_filter_online(samples)
features = None
for k, chan_freq in enumerate(self.channels_2train):# loop on channels
features_chan = self.extract_features(samples[k],chan_freq)
if features == None:
features = features_chan
else:
features = np.hstack([features, features_chan])
#import pdb; pdb.set_trace()
#return dict(eeg_multi_features=features)
return features
def sim_call_mov(self):
# Gererate artificial mov data
fsample = 1000.00 #Sample frequency in Hz
f = 10 # in Hz
rest_amp = 10
move_amp = 5; #mov state amplitude
cnt = 1
cnt_noise = 1
#samples = dict()
samples= []
for k in self.eeg_channels: #for loop on number of electrodes
if k in ['chan13_filt', 'chan14_filt', 'chan18_filt', 'chan19_filt']:
move_noise = move_amp*0.1*np.random.randn(self.n_win_pts) #10% of signal amplitude
move_signal = np.zeros(self.n_win_pts)
for i in np.arange(self.n_win_pts):
move_signal[i] = move_amp*cnt * math.sin((f+cnt-1)*2*math.pi*t[i]) + move_noise[i]
cnt += 1
else:
move_signal = rest_amp*0.1*cnt_noise*np.random.randn(self.n_win_pts) #10% of signal amplitude
cnt_noise += 1
if samples == []:
samples = move_signal
else:
samples = np.vstack([samples, move_signal])
#samples[k] = mov_signal
samples = self.Laplacian_filter_online(samples)
features = None
for k, chan_freq in enumerate(self.channels_2train):# loop on channels
features_chan = self.extract_features(samples[k],chan_freq)
if features == None:
features = features_chan
else:
features = np.hstack([features, features_chan])
#import pdb; pdb.set_trace()
#return dict(eeg_multi_features=features)
return features
def extract_features_2retrain(self, rest_data, mov_data):
'''
Filter samples and extract features. Used for the online retraining of the decoder
'''
rest_data = self.Laplacian_filter_online(rest_data)
mov_data = self.Laplacian_filter_online(mov_data)
win_size = 50
step_size = 5
rest_features = None
mov_features = None
min_len = min(rest_data.shape[1],mov_data.shape[1])
rest_data = rest_data[:,rest_data.shape[1]-min_len:]
mov_data = mov_data[:,mov_data.shape[1]-min_len:]
t0 = time.time()
for k, chan_freq in enumerate(self.channels_2train):# for loop on channels #THIS HAS TO BE FIXED!!!! THE DATA FROM BRAINAMP MIGHT NOT BE IN ORDER ACCORDING TO OUR LIST OF CHANNELS
n = 0
r_features_ch = None
m_features_ch = None
while n <= (rest_data.shape[1] - win_size):
r_feats = self.extract_features(rest_data[k,n:n+win_size],chan_freq)
m_feats = self.extract_features(mov_data[k,n:n+win_size],chan_freq)
if r_features_ch == None:
r_features_ch = r_feats
m_features_ch = m_feats
else:
r_features_ch = np.vstack([r_features_ch, r_feats])
m_features_ch = np.vstack([m_features_ch, m_feats])
n +=step_size
#rest_features_chan = self.extract_features(rest_data[k])
#mov_features_chan = self.extract_features(mov_data[k])
if rest_features == None:
rest_features = r_features_ch
mov_features = m_features_ch
else:
rest_features = np.hstack([rest_features, r_features_ch])
mov_features = np.hstack([mov_features, m_features_ch])
# print 'features'
# print time.time() - t0
#return dict(rest_features=rest_features, mov_features = mov_features)
return rest_features, mov_features
| 39.56135 | 356 | 0.596767 |
d0a812df66614ac1f4f5662eefaa44d1e4659699 | 1,859 | py | Python | research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contextual bandit algorithm that selects an action at random."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from research.deep_contextual_bandits.bandits.core.bandit_algorithm import BanditAlgorithm
class FixedPolicySampling(BanditAlgorithm):
"""Defines a baseline; returns an action at random with probs given by p."""
def __init__(self, name, p, hparams):
"""Creates a FixedPolicySampling object.
Args:
name: Name of the algorithm.
p: Vector of normalized probabilities corresponding to sampling each arm.
hparams: Hyper-parameters, including the number of arms (num_actions).
Raises:
ValueError: when p dimension does not match the number of actions.
"""
self.name = name
self.p = p
self.hparams = hparams
if len(p) != self.hparams.num_actions:
raise ValueError('Policy needs k probabilities.')
def action(self, context):
"""Selects an action at random according to distribution p."""
return np.random.choice(range(self.hparams.num_actions), p=self.p)
| 35.75 | 90 | 0.697687 |
6b7821385f4dcd6329554fe2b408ee8aafed6296 | 18,702 | py | Python | tools/utilities/pitest/drivetest.py | siddu1998/ELL | 993d5370f0f7a274e8dfd8f43220c792be46f314 | [
"MIT"
] | 1 | 2018-11-08T06:19:31.000Z | 2018-11-08T06:19:31.000Z | tools/utilities/pitest/drivetest.py | vishnoitanuj/ELL | 993d5370f0f7a274e8dfd8f43220c792be46f314 | [
"MIT"
] | null | null | null | tools/utilities/pitest/drivetest.py | vishnoitanuj/ELL | 993d5370f0f7a274e8dfd8f43220c792be46f314 | [
"MIT"
] | 1 | 2019-12-19T10:02:48.000Z | 2019-12-19T10:02:48.000Z | #!/usr/bin/env python3
####################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: drivetest.py
## Authors: Chris Lovett
##
## Requires: Python 3.x
##
####################################################################################################
import logging
import os
from os.path import basename
import sys
import argparse
import glob
import subprocess
import json
import operator
from shutil import copyfile
from shutil import rmtree
import zipfile
import socket
import time
import paramiko
import requests
current_script = os.path.basename(__file__)
# this script may be called from a different location, so we need the path
# relative to it
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "../pythonlibs"))
import find_ell
import picluster
from download_helper import *
from remoterunner import RemoteRunner
import logger
# possible values for the compile option.
COMPILE_NONE=0
COMPILE_INCREMENTAL=1
COMPILE_FULL=2
class DriveTest:
def __init__(self, ipaddress=None, cluster=None, outdir=None, profile=False,
model=None, labels=None, target="pi3", target_dir="/home/pi/pi3",
username="pi", password="raspberry", iterations=1, expected=None,
blas=True, compile=COMPILE_INCREMENTAL, test=True, verbose=True, timeout=None, apikey=None,
gitrepo = None):
self.ipaddress = ipaddress
self.build_root = find_ell.find_ell_build()
self.ell_root = os.path.dirname(self.build_root)
self.output_dir = outdir
self.target_dir = target_dir
self.labels_file = labels
self.ell_model = model
self.username = username
self.password = password
self.target = target
self.cluster = cluster
self.apikey = apikey
self.blas = blas
self.expected = expected
self.profile = profile
self.compile = compile
self.test = test
self.verbose = verbose
self.prediction_time = None
self.logger = logger.get()
self.rePlatform = "ARMv7.*"
if target == "pi0":
self.rePlatform = "ARMv6.*"
if timeout:
self.timeout = int(timeout)
else:
self.timeout = None
self.iterations = iterations
# local state.
self.model_name = None
self.machine = None
self.ell_json = None
self.created_dirs = []
self.gallery_url = "https://github.com/Microsoft/ELL-models/raw/master/"
if gitrepo:
self.gallery_url = clone_repo(gitrepo, get_home_path())
# initialize state from the args
if not self.output_dir:
self.output_dir = "test"
self.test_dir = os.path.abspath(self.output_dir)
if os.path.isdir(self.test_dir):
if self.compile == COMPILE_FULL:
rmtree(self.test_dir)
else:
if self.compile == COMPILE_NONE:
raise Exception("Test only usage requires outdir '{}' to exist already".format(self.output_dir))
os.makedirs(self.test_dir)
if self.compile:
self.extract_model_info(self.ell_model, self.labels_file)
self.output_dir = os.path.join(self.test_dir, self.target)
if self.test:
self.resolve_address(self.ipaddress, self.cluster)
def __enter__(self):
"""Called when this object is instantiated with 'with'"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Called on cleanup of this object that was instantiated with 'with'"""
self.cleanup()
def cleanup(self):
"""Unlocks the target device if it is part of a cluster"""
if self.machine:
self.logger.info("Unlocking machine: " + self.machine.ip_address)
f = self.cluster.unlock(self.machine.ip_address)
if f.current_user_name:
self.logger.error("Failed to free the machine at " + self.machine.ip_address)
else:
self.logger.info("Freed machine at " + self.machine.ip_address)
def resolve_address(self, ipaddress, cluster):
"""Resolves the ip address of the target device and locks it if it is
part of a cluster"""
if cluster:
self.cluster = picluster.PiBoardTable(cluster, self.apikey)
task = " ".join((current_script, self.model_name))
if ipaddress:
# A specific machine is requested, try to lock it
self.machine = self.cluster.lock(ipaddress, task)
self.logger.info("Locked requested machine at " + self.machine.ip_address)
else:
# No specific machine requested, find a free machine
self.machine = self.cluster.wait_for_free_machine(task, rePlatform=self.rePlatform)
self.logger.info("Locked machine at " + self.machine.ip_address)
# if any of the above fails, this line should throw
self.ipaddress = self.machine.ip_address
else:
if not ipaddress:
raise Exception("Missing ipaddress or pi cluster address")
self.ipaddress = ipaddress
def extract_model_info(self, ell_model, labels_file):
"""Extracts information about a model"""
if not ell_model:
self.model_name = "d_I160x160x3CMCMCMCMCMCMC1AS"
self.ell_model = self.model_name + ".ell"
else:
self.ell_model = ell_model
name,ext = os.path.splitext(ell_model)
if ext.lower() == ".zip":
with zipfile.ZipFile(ell_model) as myzip:
extract = True
for name in myzip.namelist():
if os.path.splitext(name)[1] == ".ell":
filename = os.path.join(self.test_dir, name)
if os.path.isfile(filename) and os.path.getmtime(self.ell_model) < os.path.getmtime(filename):
# already extracted and up to date.
extract = False
if extract:
filename = myzip.extract(myzip.filelist[0], self.test_dir)
self.logger.info("extracted: {}".format(filename))
if filename != "":
self.ell_model = filename
else:
# not a zip archive
self.ell_model = ell_model
self.model_name, ext = os.path.splitext(basename(self.ell_model))
if ext.lower() == ".zip":
self.model_name, ext = os.path.splitext(self.model_name)
if not labels_file:
self.labels_file = None
else:
self.labels_file = os.path.abspath(labels_file)
def copy_files(self, filelist, folder):
"""Copies a list of files to a folder"""
target_dir = os.path.join(self.test_dir, folder)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
for path in filelist:
head, file_name = os.path.split(path)
dest = os.path.join(target_dir, file_name)
if os.path.isfile(dest) and os.path.getmtime(path) < os.path.getmtime(dest):
continue # this file already up to date
if self.verbose:
self.logger.info("Copying file: " + path + " to " + target_dir)
if not os.path.isfile(path):
raise Exception("expected file not found: " + path)
copyfile(path, dest)
def configure_runtest(self, dest):
"""Creates the remote bash script"""
with open(os.path.join(self.ell_root, "tools/utilities/pitest/runtest.sh.in"), "r") as f:
template = f.read()
template = template.replace("@LABELS@", basename(self.labels_file))
template = template.replace("@COMPILED_MODEL@", basename(self.model_name))
template = template.replace("@TARGET_DIR@", self.target_dir)
template = template.replace("@ITERATIONS@", str(self.iterations))
output_template = os.path.join(dest, "runtest.sh")
# raspberry pi requires runtest to use 0xa for newlines, so fix autocrlf
# that happens on windows.
with open(output_template, "w", newline="\n") as of:
of.write(template)
def find_files_with_extension(self, path, extension):
"""Searches for files with the given extension"""
cwd = os.getcwd()
os.chdir(path)
files = glob.glob("*.{}".format(extension))
os.chdir(cwd)
return files
def get_bash_files(self):
"""Copies demo files needed to run the test"""
self.copy_files(
[ os.path.join(self.ell_root, "tools/utilities/pitest/coffeemug.jpg"),
os.path.join(self.ell_root, "tools/utilities/pythonlibs/vision/demo.py"),
os.path.join(self.ell_root, "tools/utilities/pythonlibs/vision/demoHelper.py"),
self.labels_file], self.output_dir)
self.configure_runtest(self.output_dir)
def remove_bitcode(self):
# avoid copying over bitcode files (as they are big)
bitcode_files = self.find_files_with_extension(self.output_dir, "bc")
for bitcode in bitcode_files:
bitcode_path = os.path.join(self.output_dir, bitcode)
if os.path.isfile(bitcode_path):
os.remove(bitcode_path)
def get_default_model(self):
"""Downloads the default model"""
if (os.path.isfile(self.ell_model)):
# a full path was already provided to a local model, no need for download.
pass
else:
self.ell_model = os.path.join(self.test_dir, self.model_name + '.ell')
if (not os.path.isfile(self.ell_model)) :
self.logger.info("downloading default model...")
download_and_extract_model(
self.gallery_url + "models/ILSVRC2012/" + self.model_name + "/" + self.model_name + ".ell.zip",
model_extension=".ell",
local_folder=self.test_dir)
def get_default_labels(self):
if not self.labels_file:
self.labels_file = "categories.txt"
if (not os.path.isfile(self.labels_file)):
self.logger.info("downloading default categories.txt...")
self.labels_file = download_file(self.gallery_url + "models/ILSVRC2012/categories.txt",
local_folder=self.test_dir)
def get_model(self):
"""Initializes the user-specified model or picks the default one"""
self.get_default_model()
self.get_default_labels()
self.logger.info("using ELL model: " + self.model_name)
def wrap_project(self):
"""Creates a project for the model and target"""
if os.path.isdir(self.output_dir):
if self.compile == COMPILE_INCREMENTAL:
try:
base_name = os.path.basename(self.ell_model)
obj_file = os.path.join(self.output_dir, os.path.splitext(base_name)[0] + ".obj")
if os.path.isfile(obj_file) and os.path.getmtime(self.ell_model) < os.path.getmtime(obj_file):
self.logger.info("wrapped model already up to date")
return # already up to date.
except:
pass # model needs to be re-compiled then.
rmtree(self.output_dir)
sys.path.append(os.path.join(current_path, "../../wrap"))
mpp = __import__("wrap")
builder = mpp.ModuleBuilder()
builder_args = [self.ell_model, "-target", self.target, "-outdir",
self.output_dir, "--blas", str(self.blas)]
if self.verbose:
builder_args.append("-v")
if self.profile:
builder_args.append("-profile")
builder.parse_command_line(builder_args)
builder.run()
def verify_remote_test(self, output):
"""Verifies the remote test results and prints a pass or fail"""
self.logger.info("==========================================================")
found = False
prediction_time = 0
prompt = "Average prediction time:"
previous = None
prediction = "not found"
for line in output:
if prompt in line:
prediction_time = float(line[len(prompt):])
self.prediction_time = prediction_time
prediction = previous
if "socket.timeout" in line:
raise Exception("### Test failed due to timeout")
previous = line
if self.expected:
found = (self.expected in prediction)
else:
found = True
if found:
self.logger.info("### Test passed")
self.logger.info("Prediction=%s, time=%f" % (prediction, prediction_time))
elif self.expected:
msg = "### Test Failed, expecting %s, but found '%s' in time=%f" \
% (self.expected, prediction, prediction_time)
self.logger.error(msg)
raise Exception(msg)
else:
self.logger.error("### Test Failed")
raise Exception("### Test Failed")
def run_test(self):
"""Runs the test"""
try:
if self.compile:
self.get_model()
self.wrap_project()
self.get_bash_files()
self.remove_bitcode()
if self.test:
start_time = time.time()
print("source={}".format(self.output_dir))
# do not pass cluster to remote runner because we've already locked the machine.
runner = RemoteRunner(cluster=None,
ipaddress=self.ipaddress,
username=self.username,
password=self.password,
source_dir=self.output_dir,
target_dir=self.target_dir,
command="runtest.sh",
verbose=self.verbose,
start_clean=not self.test,
timeout=self.timeout,
cleanup=False)
output = runner.run_command()
self.verify_remote_test(output)
end_time = time.time()
total_time = end_time - start_time
self.logger.info("Remote test time: %f seconds" % (end_time - start_time))
return total_time
except:
errorType, value, traceback = sys.exc_info()
self.logger.error("### Exception: " + str(errorType) + ": " + str(value) + "\n" + str(traceback))
raise Exception("### Test Failed")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(message)s")
"""Parses command line arguments"""
arg_parser = argparse.ArgumentParser(
"This script uses ELL to create a demo project for a model "
"(default is d_I160x160x3CMCMCMCMCMCMC1AS from the ELL gallery)\n"
"on a target device (default is Raspberry Pi 3), pushes it to the given\n"
"device's ip address using ssh and scp, then executes the test.\n"
"The test also measures the accuracy and performance of evaluating the model.\n")
# options
arg_parser.add_argument("--ipaddress", default=None, help="IP address of the target devices")
arg_parser.add_argument("--cluster", default=None, help="http address of the cluster server that controls access to the target devices")
arg_parser.add_argument("--outdir", default=".", help="where to store local working files as a staging area (default '.')")
arg_parser.add_argument("--profile", help="enable profiling functions in the ELL module", action="store_true")
model_group = arg_parser.add_argument_group("phase", "options for two separate phases")
arg_parser.add_argument("--compile", default="true", help="enable compile step preparing model for --test phase (default 'True')")
arg_parser.add_argument("--test", default="true", help="enable test phase, assume the outdir has already been built (default 'True')")
model_group = arg_parser.add_argument_group("model", "options for loading a non-default model. All 3 must be specified for a non-default model to be used.")
model_group.add_argument("--model", help="path to an ELL model file, the filename (without extension) will be used as the model name")
model_group.add_argument("--labels", help="path to the labels file for evaluating the model")
arg_parser.add_argument("--target", help="the target platform.\n"
"Choices are pi3 (Raspberry Pi 3) and aarch64 (Dragonboard)", choices=["pi0", "pi3", "pi3_64", "aarch64"], default="pi3")
arg_parser.add_argument("--target_dir", help="the directory on the target device for running the test", default="/home/pi/pi3")
arg_parser.add_argument("--username", help="the username for the target device", default="pi")
arg_parser.add_argument("--password", help="the password for the target device", default="raspberry")
arg_parser.add_argument("--iterations", "-i", type=int, help="the number of iterations for each predict (default 1)", default=1)
arg_parser.add_argument("--expected", "-e", help="the string to search for to verify test passed (default '')", default=None)
arg_parser.add_argument("--blas", help="enable or disable the use of Blas on the target device (default 'True')", default="True")
arg_parser.add_argument("--verbose", help="enable or disable verbose print output (default 'True')", default="True")
arg_parser.add_argument("--timeout", help="set remote test run timeout in seconds (default '300')", default="300")
argv = sys.argv
argv.pop(0)
args = arg_parser.parse_args(argv)
def str2bool(v):
"""Converts a string to a bool"""
return v.lower() in ("yes", "true", "t", "1")
with DriveTest(args.ipaddress, args.cluster, args.outdir, args.profile,
args.model, args.labels, args.target, args.target_dir, args.username,
args.password, args.iterations, args.expected, str2bool(args.blas),
str2bool(args.compile), str2bool(args.test), str2bool(args.verbose), args.timeout) as tester:
tester.run_test()
| 44.528571 | 160 | 0.595765 |
b1517cfe6480106fce823cc485813693a5882f7a | 981 | py | Python | mysite/main/migrations/0003_auto_20200117_0237.py | sripadha/AutoINFO | e479620d5c7d2648ea11cab6ae1b8bb628cd1cd4 | [
"BSD-3-Clause"
] | null | null | null | mysite/main/migrations/0003_auto_20200117_0237.py | sripadha/AutoINFO | e479620d5c7d2648ea11cab6ae1b8bb628cd1cd4 | [
"BSD-3-Clause"
] | 9 | 2020-06-05T20:45:14.000Z | 2021-12-13T20:31:47.000Z | mysite/main/migrations/0003_auto_20200117_0237.py | sripadha/AutoINFO | e479620d5c7d2648ea11cab6ae1b8bb628cd1cd4 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.2 on 2020-01-16 21:07
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200117_0055'),
]
operations = [
migrations.AlterField(
model_name='tutorial',
name='tutorial_published',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 17, 2, 37, 26, 661427), verbose_name='date published'),
),
migrations.AlterField(
model_name='tutorial',
name='tutorial_series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.TutorialSeries', verbose_name='Series'),
),
migrations.AlterField(
model_name='tutorialcategory',
name='category_slug',
field=models.CharField(default=1, max_length=200),
),
]
| 31.645161 | 131 | 0.611621 |
e4e1458c8f76485ccb0557bbf97a3130c2c9e5b3 | 4,081 | py | Python | baselines/jft/experiments/vit_be/jft300m_be_vit_small8_32.py | sorennelson/uncertainty-baselines | 2d8102d1df6f413e85becb0d37b468acbf8730e7 | [
"Apache-2.0"
] | 794 | 2020-07-17T06:23:58.000Z | 2022-03-31T08:31:53.000Z | baselines/jft/experiments/vit_be/jft300m_be_vit_small8_32.py | sorennelson/uncertainty-baselines | 2d8102d1df6f413e85becb0d37b468acbf8730e7 | [
"Apache-2.0"
] | 136 | 2020-08-04T22:42:04.000Z | 2022-03-26T21:07:03.000Z | baselines/jft/experiments/vit_be/jft300m_be_vit_small8_32.py | sorennelson/uncertainty-baselines | 2d8102d1df6f413e85becb0d37b468acbf8730e7 | [
"Apache-2.0"
] | 129 | 2020-08-16T12:46:55.000Z | 2022-03-31T23:00:10.000Z | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT + BatchEnsemble.
"""
# pylint: enable=line-too-long
import ml_collections
def get_config():
"""Config for training on JFT300M. Batch size 4096 fits on DF4x4."""
config = ml_collections.ConfigDict()
config.seed = 0
# JFT parameters.
config.dataset = 'jft/entity:1.0.0'
config.val_split = 'test[:49511]' # aka tiny_test/test[:5%] in task_adapt
config.train_split = 'train' # task_adapt used train+validation so +64167
config.num_classes = 18291
config.init_head_bias = -10.0 # ~= ln(1/18k) ~= ln(1/num_classes)
config.loss_to_apply = 'softmax_xent'
pp_common = '|value_range(-1, 1)'
pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
# pp_common += f'|onehot({config.num_classes}, key='labels_extended', key_result='labels') # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
# Model section
config.model_name = 'PatchTransformerBE'
config.model = ml_collections.ConfigDict()
config.model.patch_size = (32, 32)
config.model.hidden_size = 512
config.model.representation_size = 512
config.model.classifier = 'token'
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.num_layers = 8
config.model.transformer.dropout_rate = 0.0
config.model.transformer.mlp_dim = 2048
config.model.transformer.num_heads = 8
config.model.transformer.attention_dropout_rate = 0.0
# BatchEnsemble parameters.
config.model.transformer.be_layers = (1, 3, 5, 7)
config.model.transformer.ens_size = 4
config.model.transformer.random_sign_init = 0.5
config.fast_weight_lr_multiplier = 1.0
# Optimizer parameters.
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict(dict(beta1=0.9, beta2=0.999))
config.weight_decay = [0.1]
config.weight_decay_pattern = ['.*/kernel'] # Does not decay fast-weights.
config.clip_grad_norm = None
config.lr = ml_collections.ConfigDict()
config.lr.base = 1e-3 # LR likely has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
config.batch_size = 1024 # Global batch size.
config.batch_size_eval = 1024 # Global batch size.
config.num_epochs = 5
config.log_training_every_n_steps = 50
config.run_evaluation_every_n_steps = 1000
config.log_training_first_n_steps = 10
config.log_eval_steps = 1000
config.write_checkpoint_every_n_steps = 5000
config.checkpoint_write_timeout_secs = 10
config.prefetch_to_device = 2
config.trial = 0
config.args = {}
return config
def get_sweep(hyper):
return hyper.product([
# Use this as a sensible sweep over other hyperparameters.
# hyper.sweep('config.seed', list(range(3))),
hyper.sweep('config.model.transformer.ens_size', [4]),
hyper.sweep('config.num_epochs', [14]),
hyper.sweep('config.model.transformer.be_layers',
[(1, 3, 5, 7)]), # Every two layers.
hyper.sweep('config.model.transformer.random_sign_init',
[-0.5, 0.5]),
hyper.sweep('config.fast_weight_lr_multiplier', [0.5, 1.0, 2.0]),
hyper.sweep('config.lr.base', [1e-3]),
])
| 35.486957 | 125 | 0.718206 |
118e1c460aa9bf35d090242a9d2e4fa95f303ee2 | 15,059 | py | Python | dev/snippets/xxyxyz_org_line_breaking.py | loveencounterflow/hengist | 2d5ab27503eef35919d9c0425b024adb8b66394a | [
"MIT"
] | null | null | null | dev/snippets/xxyxyz_org_line_breaking.py | loveencounterflow/hengist | 2d5ab27503eef35919d9c0425b024adb8b66394a | [
"MIT"
] | 2 | 2021-04-22T17:51:16.000Z | 2022-02-11T22:10:39.000Z | dev/snippets/xxyxyz_org_line_breaking.py | loveencounterflow/hengist | 2d5ab27503eef35919d9c0425b024adb8b66394a | [
"MIT"
] | null | null | null | # thx to https://xxyxyz.org/line-breaking/
# # Line breaking
# Line breaking, also known as word wrapping or paragraph formation, is the problem of dividing a text into
# a sequence of lines so that every line spans at most some fixed width.
# One way of solving the problem is to gradually form the lines word by word. In the case the concatenating
# of one more word to a line would exceed its prescribed width, simply move the word to the next line and
# continue from there. While such greedy algorithm is both simple and fast, it may leave too much whitespace
# at the end of a line. The reasoning against such visual disparities is that they capture attention and
# therefore alter the reading in arbitrary way. In other words, it might be advantageous not to make use of
# the full line width now, as it might help to even out some lines later. Consider the following examples:
# ---------
# a b c d e
# f g h i j
# k l m n o
# p
# qqqqqqqqq
# and
# ---------
# a b c d
# e f g h
# i j k l
# m n o p
# qqqqqqqqq
# To counter the wildly different line lengths one could measure the cost of breaking a line in a particular
# way and penalize the huge gaps much more than the smaller ones. The idea then is to come up with a
# configuration of line breaks which minimizes the total sum of such penalties, a strategy know as "minimum
# raggedness". A line exceeding the allowed width should incur an infinitely large penalty; otherwise the
# cost should follow a quickly growing function, such as the squared size of the gap.
# As a side note, line breaking defined as above is a special case of the "least weight subsequence"
# problem.
############################################################################################################
# # Brute force
# An immediate approach is to try to search trough all the possible break configurations and return the best
# one. Since any two consecutive words might get split up by a break, there are order of O(2 ^ n)
# alternatives. Consequently, it is not feasible to form a paragraph of more than 30 - 40 words.
from itertools import combinations, chain
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def naive(text, width):
words = text.split()
count = len(words)
minimum = 10 ** 20
breaks = ()
for b in powerset(range(1, count)):
m = 0
i = 0
for j in chain(b, (count,)):
w = len(' '.join(words[i:j]))
if w > width:
break
m += (width - w) ** 2
i = j
else:
if m < minimum:
minimum = m
breaks = b
lines = []
i = 0
for j in chain(breaks, (count,)):
lines.append(' '.join(words[i:j]))
i = j
return lines
############################################################################################################
# # Dynamic programming
# The deficiency of first idea lies in that it repeatedly solves the same subproblems. Yet suppose there was
# an optimal configuration of lines. Plucking off its last line would still keep the layout optimal because
# otherwise it would be possible to improve it and, together with the removed line, would result in even
# better configuration, contradicting its optimality. To solve each subproblem just once, it is then
# necessary to find out and later re-use which of the lines ending with some word contributes least to the
# overall cost. As each of the "n" words could terminate at most "n" potential lines, the algorithm runs in
# O(n ^ 2).
def dynamic(text, width):
words = text.split()
count = len(words)
slack = [[0] * count for i in range(count)]
for i in range(count):
slack[i][i] = width - len(words[i])
for j in range(i + 1, count):
slack[i][j] = slack[i][j - 1] - len(words[j]) - 1
minima = [0] + [10 ** 20] * count
breaks = [0] * count
for j in range(count):
i = j
while i >= 0:
if slack[i][j] < 0:
cost = 10 ** 10
else:
cost = minima[i] + slack[i][j] ** 2
if minima[j + 1] > cost:
minima[j + 1] = cost
breaks[j] = i
i -= 1
lines = []
j = count
while j > 0:
i = breaks[j - 1]
lines.append(' '.join(words[i:j]))
j = i
lines.reverse()
return lines
############################################################################################################
# # Shortest path
# The previous way can be sped up even further: the length offsets used to calculate any line length in
# constant time can easily be pre-processed in O(n), rather than O(n ^ 2), and there is no point in putting
# ever more words on a line once it reaches the allowed width. The performance then improves down to O(n *
# width).
# This is exactly the same result as if the text was thought of as a (topologically sorted) directed acyclic
# graph, with the nodes and arcs representing words and breaks, respectively. By substituting the penalties
# for the weights, the problem becomes the one of finding the shortest path which is known to be solvable in
# linear time. Note that the number of edges remains O(n * width).
# Flat uses the latter method.
def shortest(text, width):
words = text.split()
count = len(words)
offsets = [0]
for w in words:
offsets.append(offsets[-1] + len(w))
minima = [0] + [10 ** 20] * count
breaks = [0] * (count + 1)
for i in range(count):
j = i + 1
while j <= count:
w = offsets[j] - offsets[i] + j - i - 1
if w > width:
break
cost = minima[i] + (width - w) ** 2
if cost < minima[j]:
minima[j] = cost
breaks[j] = i
j += 1
lines = []
j = count
while j > 0:
i = breaks[j]
lines.append(' '.join(words[i:j]))
j = i
lines.reverse()
return lines
############################################################################################################
# # Binary search
# The method using dynamic programming can be written as two nested loops: the outer one iterates over every
# word and the inner one searches for the most suitable break. Hirschberg and Larmore showed in 1987 an
# algorithm which is able to use binary search instead of the inner loop in the case the weight function is
# "concave". It turns out that the penalty discussed thus far can be made to satisfy this property by
# handling the "infinities" slightly differently, giving an overall time of O(n * log n). A similar
# algorithm due to Galil and Giancarlo from 1989 is given below.
# The concave property says: w(i, j) + w(i', j') <= w(i', j) + w(i, j'), where i < i' < j < j'.
from collections import deque
def binary(text, width):
words = text.split()
count = len(words)
offsets = [0]
for w in words:
offsets.append(offsets[-1] + len(w))
minima = [0] * (count + 1)
breaks = [0] * (count + 1)
def c(i, j):
w = offsets[j] - offsets[i] + j - i - 1
if w > width:
return 10 ** 10 * (w - width)
return minima[i] + (width - w) ** 2
def h(l, k):
low, high = l + 1, count
while low < high:
mid = (low + high) // 2
if c(l, mid) <= c(k, mid):
high = mid
else:
low = mid + 1
if c(l, high) <= c(k, high):
return high
return l + 2
q = deque([(0, 1)])
for j in range(1, count + 1):
l = q[0][0]
if c(j - 1, j) <= c(l, j):
minima[j] = c(j - 1, j)
breaks[j] = j - 1
q.clear()
q.append((j - 1, j + 1))
else:
minima[j] = c(l, j)
breaks[j] = l
while c(j - 1, q[-1][1]) <= c(q[-1][0], q[-1][1]):
q.pop()
q.append((j - 1, h(j - 1, q[-1][0])))
if j + 1 == q[1][1]:
q.popleft()
else:
q[0] = q[0][0], (q[0][1] + 1)
lines = []
j = count
while j > 0:
i = breaks[j]
lines.append(' '.join(words[i:j]))
j = i
lines.reverse()
return lines
############################################################################################################
# # Total monotonicity
# Each iteration of the dynamic programming scheme can also be seen as filling in a matrix, where a cell
# adds up the least overall cost to a subproblem (a column minimum) and a penalty. A concave weight function
# implies that the matrix is totally monotone and in 1987 Shor, Moran, Aggarwal, Wilber and Klawe devised an
# algorithm which finds the row maxima of such matrix in linear time. Even though SMAWK can be modified to
# find column minima instead, it is not possible to apply it directly to this "on-line" matrix as it might
# try to evaluate a not "available" cell, i.e. a cell dependent on some yet unknown column minimum. However,
# Wilber came up with an algorithm in 1988 which "pretends" to know the minima and still runs in O(n) time.
# An "ordered" algorithm which obeys the availability of the matrix as presented by Aggarwal and Tokuyama in
# 1998 follows.
def linear(text, width):
words = text.split()
count = len(words)
offsets = [0]
for w in words:
offsets.append(offsets[-1] + len(w))
minima = [0] + [10 ** 20] * count
breaks = [0] * (count + 1)
def cost(i, j):
w = offsets[j] - offsets[i] + j - i - 1
if w > width:
return 10 ** 10 * (w - width)
return minima[i] + (width - w) ** 2
def smawk(rows, columns):
stack = []
i = 0
while i < len(rows):
if stack:
c = columns[len(stack) - 1]
if cost(stack[-1], c) < cost(rows[i], c):
if len(stack) < len(columns):
stack.append(rows[i])
i += 1
else:
stack.pop()
else:
stack.append(rows[i])
i += 1
rows = stack
if len(columns) > 1:
smawk(rows, columns[1::2])
i = j = 0
while j < len(columns):
if j + 1 < len(columns):
end = breaks[columns[j + 1]]
else:
end = rows[-1]
c = cost(rows[i], columns[j])
if c < minima[columns[j]]:
minima[columns[j]] = c
breaks[columns[j]] = rows[i]
if rows[i] < end:
i += 1
else:
j += 2
n = count + 1
i = 0
offset = 0
while True:
r = min(n, 2 ** (i + 1))
edge = 2 ** i + offset
smawk(range(0 + offset, edge), range(edge, r + offset))
x = minima[r - 1 + offset]
for j in range(2 ** i, r - 1):
y = cost(j + offset, r - 1 + offset)
if y <= x:
n -= j
i = 0
offset += j
break
else:
if r == n:
break
i = i + 1
lines = []
j = count
while j > 0:
i = breaks[j]
lines.append(' '.join(words[i:j]))
j = i
lines.reverse()
return lines
############################################################################################################
# # Divide & conquer
# One additional option is to replace the preceding SMAWK routine and its fairly large constant factor by a
# simple divide & conquer monotone matrix search. The complexity will drop back to O(n * log n) but for
# smaller problem instances it may actually run faster than the asymptotically superior approach.
def divide(text, width):
words = text.split()
count = len(words)
offsets = [0]
for w in words:
offsets.append(offsets[-1] + len(w))
minima = [0] + [10 ** 20] * count
breaks = [0] * (count + 1)
def cost(i, j):
w = offsets[j] - offsets[i] + j - i - 1
if w > width:
return 10 ** 10
return minima[i] + (width - w) ** 2
def search(i0, j0, i1, j1):
stack = [(i0, j0, i1, j1)]
while stack:
i0, j0, i1, j1 = stack.pop()
if j0 < j1:
j = (j0 + j1) // 2
for i in range(i0, i1):
c = cost(i, j)
if c <= minima[j]:
minima[j] = c
breaks[j] = i
stack.append((breaks[j], j+1, i1, j1))
stack.append((i0, j0, breaks[j]+1, j))
n = count + 1
i = 0
offset = 0
while True:
r = min(n, 2 ** (i + 1))
edge = 2 ** i + offset
search(0 + offset, edge, edge, r + offset)
x = minima[r - 1 + offset]
for j in range(2 ** i, r - 1):
y = cost(j + offset, r - 1 + offset)
if y <= x:
n -= j
i = 0
offset += j
break
else:
if r == n:
break
i = i + 1
lines = []
j = count
while j > 0:
i = breaks[j]
lines.append(' '.join(words[i:j]))
j = i
lines.reverse()
return lines
# # Further reading
# * D. E. Knuth, M. F. Plass. Breaking Paragraphs into Lines. Software--Practice and Experience 11, 1981.
# * D. S. Hirschberg, L. L. Larmore. The least weight subsequence problem. SIAM Journal on Computing, 1987.
# * D. S. Hirschberg, L. L. Larmore. New applications of failure functions. Journal of the Association for
# Computer Machinery, 1987.
# * A. Aggarwal, M. M. Klawe, S. Moran, P. Shor, R. Wilber. Geometric Applications of a Matrix-Searching
# Algorithm. Algorithmica 2, 1987.
# * R. Wilber. The Concave Least-Weight Subsequence Problem Revisited. Journal of Algorithms 9, 1988.
# * Z. Galil, R. Giancarlo. Speeding up dynamic programming with applications to molecular biology.
# Theoretical Computer Science 64, 1989.
# * Z. Galil, K. Park. A Linear-Time Algorithm for Concave One-Dimensional Dynamic Programming. Information
# Processing Letters 33, 1989.
# * D. Eppstein. Sequence comparison with mixed convex and concave costs. Journal of Algorithms 11, 1990.
# * D. Eppstein, Z. Galil, R. Giancarlo, G. F. Italiano. Sparse dynamic programming II: Convex and concave
# cost functions. Journal of the ACM, 1992.
# * P. Becker. Construction of Nearly Optimal Multiway Trees. COCOON, vol. 1276 of Lecture Notes in Computer
# Science, 1997.
# * O. de Moor, J. Gibbons. Bridging the Algorithm Gap: A Linear-time Functional Program for Paragraph
# Formatting. Technical Report, Oxford Brookes University, 1997.
# * A. Aggarwal, T. Tokuyama. Consecutive interval query and dynamic programming on intervals. Discrete
# Applied Mathematics 85, 1998.
# — 21. 2. 2014
| 35.349765 | 108 | 0.544923 |
c5863b2bc177adab24494b4cff994e92521650e3 | 1,214 | py | Python | examples/rank_k_correlation_matrix_approximation.py | calincru/pymanopt | 3eb4696ea7fc62e89905409afadc3d905b36ed30 | [
"BSD-3-Clause"
] | 1 | 2020-03-13T12:34:07.000Z | 2020-03-13T12:34:07.000Z | examples/rank_k_correlation_matrix_approximation.py | leonbottou/pymanopt | 7d8c46f4513c3746234ba804604694b11db62d0a | [
"BSD-3-Clause"
] | null | null | null | examples/rank_k_correlation_matrix_approximation.py | leonbottou/pymanopt | 7d8c46f4513c3746234ba804604694b11db62d0a | [
"BSD-3-Clause"
] | 1 | 2018-07-01T07:45:53.000Z | 2018-07-01T07:45:53.000Z | from __future__ import print_function
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import theano.tensor as T
from pymanopt import Problem
from pymanopt.manifolds import Oblique
from pymanopt.solvers import TrustRegions
def rank_k_correlation_matrix_approximation(A, k):
"""
Returns the matrix with unit-norm columns that is closests to A w.r.t. the
Frobenius norm.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Oblique(k, n)
solver = TrustRegions()
X = T.matrix()
cost = 0.25 * T.sum((T.dot(X.T, X) - A) ** 2)
problem = Problem(manifold=manifold, cost=cost, arg=X)
return solver.solve(problem)
if __name__ == "__main__":
# Generate random problem data.
n = 10
k = 3
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Solve the problem with pymanopt.
Xopt = rank_k_correlation_matrix_approximation(A, k)
C = Xopt.T.dot(Xopt)
[w, _] = la.eig(C)
# Print information about the solution.
print('')
print("diagonal:", np.diag(C))
print("trace:", np.trace(C))
print("rank:", la.matrix_rank(C))
| 24.77551 | 78 | 0.649918 |
4b3979b0e7fcb984e3583d6b22ded8294ed3bc83 | 8,870 | py | Python | EvolutionaryAlg.py | Chocbanana/Gent | 4600883e2d5a5d0820691f53003976072fdbc79d | [
"BSD-3-Clause"
] | null | null | null | EvolutionaryAlg.py | Chocbanana/Gent | 4600883e2d5a5d0820691f53003976072fdbc79d | [
"BSD-3-Clause"
] | null | null | null | EvolutionaryAlg.py | Chocbanana/Gent | 4600883e2d5a5d0820691f53003976072fdbc79d | [
"BSD-3-Clause"
] | null | null | null | """
Author: Bhavana Jonnalagadda, 2016
"""
import random
import math
"""
OUTLINE FOR 3RD TERM:
NeuralNetwork:
- NetworkRunner:
- make flexible for assigning activation, regularization, optimizer, metrics all in general
- Hyperparamter training!!
- RecurrentNode:
- Get working!
- NeuralNetwork:
- Use Graph library to store genes/representation
- derive from Phenotype
- NetworkNode:
- Define NetworkGenome:
- general rules for ALL networks (or distinguish b/w rnn and conv)
- do remainings todos
- save networks/graphs to file
EvolutionaryAlg:
- Genome:
- Phenotype:
- compatibility
- Population:
- Fitness assessment
- combine
- mutate
- GeneticAlg:
- make multithread-able
- add debug printing
- save histories of generations
- print to file?
- add asserts for checking proper types/structures/presence
- force the virtual functions to be derive implemented
- just, raise exception?
RESEARCH AFTER ABOVE IS COMPLETED:
- improve GA
- reimplement and evolve popular networks, use popular datasets
- activation functions in form of a*sin(bx + c) + d
- fractal patterns in topology, rnn output
- analyze emergent properties
--------------------------------------------
workflow:
genome = Genome([rules])
pop = Population(genome.makenew([initial_nn]), Network)
env = NetworkRunner([params])
ga = GeneticAlgorithm(genome, pop, env)
next(ga)
TODO: consider which classes need to implement
- __eq__
- __ne__
- __next__ / __iter__
- __str__
"""
class Gene:
pass
# GRAPH of genes
class Genotype:
pass
# TODO: set containing all created genotypes
# TODO: constraints on param variation?
# - enforce param ranges and sets
class Genome:
def __init__(self, rules):
self._process_rules(rules)
self.madeGenotypes = []
self.madeGenes = {}
def makenew(self, genes):
pass
# TODO: in population
# def mate(self, parent1, parent2):
# geneseq1 = set(parent1.geneseq.keys())
# geneseq2 = set(parent2.geneseq.keys())
# # find the genes that they have the same
# same = geneseq1.intersection(geneseq2)
# # randomly select the node for those genes from each
# seq1 = random.sample(same, int(len(same)/2))
# seq2 = same.difference(seq1)
# # select the nodes from more fit parent
# excess = geneseq1.difference(geneseq2)
# mutate the new geneseq
# construct the entity from the geneseq using Network
def mutate(self, genotype):
pass
def _process_rules(self, rules):
pass
# NOTE: NEAT modification, instead of excess/disjoint genes the diff ways to be different are weighted
class Phenotype:
# derived class
def __init__(self, genotype):
pass
# TODO
# Implemented in derived class
# return a number in [0, 1]
def compatibility(self, m1, m2):
pass
# def are_compatible(self, network1, network2):
# # TODO: REDO
# geneseq1 = [n.gene for n in network1.nodes]
# geneseq2 = [n.gene for n in network2.nodes]
# ID1 = set([g["geneID"] for g in geneseq1])
# ID2 = set([g["geneID"] for g in geneseq2])
# # samegenes = ID1.intersection(ID2)
# # weightdiffs = []
# # for g in samegenes:
# # node1 = next(n for n in network1.nodes if n.gene["geneID"] == g)
# # if hasattr(node1, "weights"):
# # node2 = next(n for n in network2.nodes if n["geneID"] == g)
# diffgenes1 = ID1.difference(ID2)
# diffgenes2 = ID2.difference(ID1)
# # diff_connections_count = 0
# # diff_params_count = 0
# # for g in diffgenes1:
# # node1 = next(n for n in geneseq1 if n["geneID"] == g)
# # Compatibility threshold
# cD = 0.5
# # different genes
# c1 = 1
# # # different params
# # c2 = 1
# N = max([len(ID1), len(ID2)])
# # Compatibility distance formula
# delta = c1 * (len(diffgenes1) + len(diffgenes2)) / N
# return True if delta <= cD else False
# Population: all networks in pop, networks in each species
# Population:
# - Speciation
# - Compatability function: Use NEAT compatablity distance delta
class Population:
def __init__(self, genotype, Phenotype):
# TODO: where is seed made?
# Seed = object/organism/network, not the gene dict
seed = Phenotype(genotype)
species = {"ID": 0, "rep": seed, "members": [seed], "fitnesses": [], "species_fitness": 0}
self.sp_counter = 1
self.species_list = [species]
self.max_pop = 20
self.members = {}
def assignMember(self, id, fitness):
pass
# Re-sort into species
def speciate(self):
# get all members from across species
all_members = [(m, f) for s in self.species_list for m, f in zip(s["members"], s["fitnesses"])]
for s in self.species_list:
s["species_fitness"] = 0
s["members"] = []
s["fitnesses"] = []
# place all members in species and add their fitness to that species
for m, f in all_members:
# Find first species that member is compatible with
species = next((s for s in self.species_list if self.genome.are_compatible(m, s["rep"])), None)
# otherwise make a new species
if species is None:
self.species_list.append({"ID": self.sp_counter, "rep": m, "members":[m], "fitnesses": [f], "species_fitness": 0})
self.sp_counter += 1
else:
species["members"].append(m)
species["fitnesses"].append(f)
# re-choose representative and calculate shared fitness:
# adj_fitness(member) = fitness / (number of other members in same species)
# shared_fitness(species) = sum(all adj_fitnesses) / (n*(n-1))
for s in self.species_list:
s["rep"] = random.choice(s["members"])
s["species_fitness"] = sum(s["fitnesses"]) / (len(s["members"]) * (len(s["members"]) - 1))
# TODO: delete empty species or species with no asisgned children
# Make the new population
# TODO: assert all members have been assigned a species and fitness
# TODO:
# - Move species rep calculation
# - get COMPATABILITY() for mating pairs
# - COMBINE in a way that preserves every fit member's genetic info
# - MUTATE as needed (to fill max pop)
def repopulate(self, genome):
total = sum([s["species_fitness"] for s in self.species_list])
for s in self.species_list:
# get parents
max_children = int(Population.max_pop * s["species_fitness"] / total)
top_members = sorted(zip(s["fitnesses"], range(len(s["members"]))))
if len(s["members"]) == 1:
parents = [s["members"][0], s["members"][0]]
elif len(s["members"]) < 3:
parents = s["members"]
# get top 60%
else:
parents = [s["members"][m[1]] for m in top_members[0:int(len(top_members) * 0.06 + 0.5)]]
parent_pairs = list(zip(parents[:-1], parents[1:]))
# create the children
children = []
for i in range(max_children):
child = self.genome.mate(*parent_pairs[i % len(parent_pairs)])
children.append(child)
# replace old population with new
s["members"] = children
s["fitnesses"] = []
class Environment:
pass
# genome = Genome([rules])
# pop = Population(genome.makenew([initial_nn]), Network)
# env = NetworkRunner([params])
# ga = GeneticAlgorithm(genome, pop, env)
# next(ga)
class GeneticAlgorithm:
def __init__(self, genome, population, environment):
self.gen = genome
self.pop = population
self.env = environment
self.generation = 0
# Returns the next population generation
def __next__(self):
for i, p in self.pop.members.items():
# assign fitness and put into a species
self.pop.assignMember(i, self.env.eval(p, self.generation))
# make new generation
self.pop.repopulate(self.gen)
self.generation += 1
def __iter__(self):
return self
# def eval_population(self):
# for s in self.population.species_list:
# s["fitnesses"] = []
# for m in s["members"]:
# fitness = self.evaluator.eval(m, self.generation, s["ID"])
# s["fitnesses"].append(fitness)
| 29.177632 | 130 | 0.588162 |
b3a873e1422ed0a820df2452a4b4eb1da042aa36 | 2,151 | py | Python | caffe2/python/operator_test/reduction_ops_test.py | shijieS/Caffe2 | f71695dcc27053e52b78f893344ea2ef2bd2da83 | [
"MIT"
] | 1 | 2019-02-25T15:57:19.000Z | 2019-02-25T15:57:19.000Z | caffe2/python/operator_test/reduction_ops_test.py | shijieS/Caffe2 | f71695dcc27053e52b78f893344ea2ef2bd2da83 | [
"MIT"
] | null | null | null | caffe2/python/operator_test/reduction_ops_test.py | shijieS/Caffe2 | f71695dcc27053e52b78f893344ea2ef2bd2da83 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestReductionOps(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sqrsum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sumsqr_op(X):
return [np.sum(X * X)]
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sumsqr_op,
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_avg(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"],
average=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
| 23.129032 | 49 | 0.527662 |
e5ec8a1740dda8f3d2f5820cb35d5e959bf89864 | 2,050 | py | Python | graphgallery/gallery/embedding/randne.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | graphgallery/gallery/embedding/randne.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | graphgallery/gallery/embedding/randne.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | import numpy as np
import scipy.sparse as sp
from sklearn import preprocessing
class RandNE:
r"""An implementation of `"RandNE" <https://zw-zhang.github.io/files/2018_ICDM_RandNE.pdf>`_ from the ICDM '18 paper "Billion-scale Network Embedding with Iterative Random Projection". The procedure uses normalized adjacency matrix based
smoothing on an orthogonalized random normally generate base node embedding matrix.
"""
def __init__(self, dimensions: int = 128, alphas: list = [0.5, 0.5], seed: int = None):
self.dimensions = dimensions
self.alphas = alphas
self.seed = seed
def _create_smoothing_matrix(self, graph):
"""
Creating the normalized adjacency matrix.
"""
degree = graph.sum(1).A1
D_inverse = sp.diags(1.0 / degree)
A_hat = D_inverse @ graph
return A_hat
def _create_embedding(self, A_hat):
"""
Using the random orthogonal smoothing.
"""
sd = 1 / self.dimensions
base_embedding = np.random.normal(0, sd, (A_hat.shape[0], self.dimensions))
base_embedding, _ = np.linalg.qr(base_embedding)
embedding = np.zeros(base_embedding.shape)
alpha_sum = sum(self.alphas)
for alpha in self.alphas:
base_embedding = A_hat.dot(base_embedding)
embedding = embedding + alpha * base_embedding
embedding = embedding / alpha_sum
return embedding
def fit(self, graph: sp.csr_matrix):
"""
Fitting a NetMF model.
Arg types:
* **graph** *(NetworkX graph)* - The graph to be embedded.
"""
A_hat = self._create_smoothing_matrix(graph)
self._embedding = self._create_embedding(A_hat)
def get_embedding(self, normalize=False) -> np.array:
"""Getting the node embedding."""
embedding = self._embedding
if normalize:
embedding = preprocessing.scale(embedding)
return embedding
| 36.607143 | 242 | 0.621951 |
b276023f3cb2d2319af64805ae3861c32000edb9 | 4,351 | py | Python | main.py | endorama/switcher | 74ad4f97d9dfc13a547115d6278c529f16bdde5a | [
"MIT"
] | null | null | null | main.py | endorama/switcher | 74ad4f97d9dfc13a547115d6278c529f16bdde5a | [
"MIT"
] | null | null | null | main.py | endorama/switcher | 74ad4f97d9dfc13a547115d6278c529f16bdde5a | [
"MIT"
] | null | null | null | import os
import time
import logging
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Wnck, Gtk
from gi.repository.Gdk import CURRENT_TIME
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
logger = logging.getLogger(__name__)
class WindowsSwitcherExtension(Extension):
def __init__(self):
super(WindowsSwitcherExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
indicator = extension.preferences['workspace_indicator']
self.workspace_indicator = True if indicator == 'true' else False
items = []
for window in get_active_windows():
if window.get_class_group_name() == 'Ulauncher':
continue
query = event.get_argument()
if not query or self.is_name_in_query(window, query):
items.append(self.create_result_item(window))
return RenderResultListAction(items)
def is_name_in_query(self, window, query):
# TODO implement fuzzy search
full_name = (window.get_name() + window.get_class_group_name()
).decode('utf-8').lower()
return query.decode('utf-8').lower() in full_name
def create_result_item(self, window):
# icon as Pixbuf is not supported; saving it on the disk as a workaround
icon = window.get_icon()
icon_name = window.get_class_group_name()
icon_type = 'ico'
icon_path = '/tmp/{}.{}'.format(icon_name, icon_type)
window_desc = window.get_name()
if self.workspace_indicator:
window_desc = '({}) {}'.format(window.workspace_id, window_desc)
if not os.path.exists(icon_path):
save_result = icon.savev(icon_path, icon_type, [], [])
if not save_result:
logger.error(
'Unable to write to /tmp. Using default icon as a fallback.')
icon_path = 'images/switch.png'
return ExtensionResultItem(
icon=icon_path,
name=window.get_class_group_name(),
description=window_desc,
on_enter=ExtensionCustomAction({'xid': window.get_xid()}))
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
# have to fetch active windows again, passing window in data not supported
windows = get_active_windows()
try:
window = next(w for w in windows if w.get_xid() == data['xid'])
# fallback in case next line doesn't work
window.activate(time.time())
# set focus by activiating with timestamp of 0;
# Wnck gives a warning but otherwise seems to work
window.activate(CURRENT_TIME)
except:
logger.error('Application not accessible')
def get_active_windows():
Gtk.init([]) # necessary only if not using a Gtk.main() loop
screen = Wnck.Screen.get_default()
screen.force_update() # recommended per Wnck documentation
windows = screen.get_windows_stacked()
for i, window in enumerate(windows):
try:
window.workspace_id = window.get_workspace().get_number() + 1
except AttributeError:
logger.debug("A window ({}) is not attached to any workspace".format(window.get_name()))
# remove the window from the list to avoid NoneType on workspace_id
del(windows[i])
# clean up Wnck (saves resources, check documentation)
screen = None
Wnck.shutdown()
return windows
if __name__ == '__main__':
WindowsSwitcherExtension().run()
| 35.08871 | 100 | 0.677316 |
33957178332948fda6a1b5cc4209e6f4f7430f3e | 476 | py | Python | myvenv/lib/python3.5/site-packages/telegram/__main__.py | F74046501/chat_bot | d6ddbe9c502ef35a4ef20e50fb367b5fa91e2783 | [
"MIT"
] | 17 | 2017-08-04T15:41:05.000Z | 2020-10-16T18:02:41.000Z | myvenv/lib/python3.5/site-packages/telegram/__main__.py | F74046501/chat_bot | d6ddbe9c502ef35a4ef20e50fb367b5fa91e2783 | [
"MIT"
] | 3 | 2017-08-04T23:37:37.000Z | 2017-08-04T23:38:34.000Z | myvenv/lib/python3.5/site-packages/telegram/__main__.py | F74046501/chat_bot | d6ddbe9c502ef35a4ef20e50fb367b5fa91e2783 | [
"MIT"
] | 3 | 2017-12-07T16:30:59.000Z | 2019-06-16T02:48:28.000Z | import sys
import urllib3
import certifi
import future
from . import __version__ as telegram_ver
def print_ver_info():
print('python-telegram-bot {0}'.format(telegram_ver))
print('urllib3 {0}'.format(urllib3.__version__))
print('certifi {0}'.format(certifi.__version__))
print('future {0}'.format(future.__version__))
print('Python {0}'.format(sys.version.replace('\n', ' ')))
def main():
print_ver_info()
if __name__ == '__main__':
main()
| 19.833333 | 62 | 0.689076 |
9cb9de638332196aee434cd76644acfec9d35465 | 1,143 | py | Python | intro/matplotlib/examples/pretty_plots/plot_scatter_ext.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 2,538 | 2015-01-01T04:58:41.000Z | 2022-03-31T21:06:05.000Z | intro/matplotlib/examples/pretty_plots/plot_scatter_ext.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 362 | 2015-01-18T14:16:23.000Z | 2021-11-18T16:24:34.000Z | intro/matplotlib/examples/pretty_plots/plot_scatter_ext.py | zmoon/scipy-lecture-notes | 75a89ddedeb48930dbdb6fe25a76e9ef0587ae21 | [
"CC-BY-4.0"
] | 1,127 | 2015-01-05T14:39:29.000Z | 2022-03-25T08:38:39.000Z | """
Plot scatter decorated
=======================
An example showing the scatter function, with decorations.
"""
import numpy as np
import matplotlib.pyplot as plt
n = 1024
X = np.random.normal(0, 1, n)
Y = np.random.normal(0, 1, n)
T = np.arctan2(Y,X)
plt.scatter(X, Y, s=75, c=T, alpha=.5)
plt.xlim(-1.5, 1.5)
plt.xticks([])
plt.ylim(-1.5, 1.5)
plt.yticks([])
# Add a title and a box around it
from matplotlib.patches import FancyBboxPatch
ax = plt.gca()
ax.add_patch(FancyBboxPatch((-0.05, .87),
width=.66, height=.165, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='white', alpha=1.0,
transform=plt.gca().transAxes))
plt.text(-0.05, 1.02, " Scatter Plot: plt.scatter(...)\n",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
transform=plt.gca().transAxes)
plt.text(-0.05, 1.01, "\n\n Make a scatter plot of x versus y ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=plt.gca().transAxes)
plt.show()
| 24.847826 | 66 | 0.581802 |
9665fc17166222987f99b2e9e935874f0e1e0a8a | 81 | py | Python | metrics/tests.py | tp00012x/bobs_banana_stand | 0ae167b1bb124408770924dcb3660760da2d715c | [
"MIT"
] | null | null | null | metrics/tests.py | tp00012x/bobs_banana_stand | 0ae167b1bb124408770924dcb3660760da2d715c | [
"MIT"
] | 6 | 2021-03-18T22:01:48.000Z | 2022-02-10T07:19:13.000Z | sales/tests.py | tp00012x/bobs_banana_stand | 0ae167b1bb124408770924dcb3660760da2d715c | [
"MIT"
] | null | null | null | from django.test import TestCase
# Create your tests here.
# TODO tests missing
| 16.2 | 32 | 0.777778 |
65a848ea6da98d45a739d2c06e8ef9b3410edcc7 | 159 | py | Python | youtube_sm_parser/__init__.py | shanedabes/youtube_sm_parser | 642663abb9ee12c8478796b7ed7b9a01210d1fad | [
"Apache-2.0"
] | 2 | 2019-05-26T07:50:35.000Z | 2020-07-14T22:23:22.000Z | youtube_sm_parser/__init__.py | shanedabes/youtube_sm_parser | 642663abb9ee12c8478796b7ed7b9a01210d1fad | [
"Apache-2.0"
] | 35 | 2019-05-18T02:09:25.000Z | 2019-12-03T20:52:21.000Z | youtube_sm_parser/__init__.py | shanedabes/youtube_sm_parser | 642663abb9ee12c8478796b7ed7b9a01210d1fad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for youtube_sm_parser."""
__author__ = """Shane Donohoe"""
__email__ = 'shane@donohoe.cc'
__version__ = '0.1.0'
| 19.875 | 46 | 0.654088 |
6feca04ba064667ade57c9288c4d2c8e49ec06c0 | 13,360 | py | Python | hmd/tests/test_parser.py | fossabot/hmd | dda00daea71449d7338b573e11a24b2db7dbd7c7 | [
"MIT"
] | null | null | null | hmd/tests/test_parser.py | fossabot/hmd | dda00daea71449d7338b573e11a24b2db7dbd7c7 | [
"MIT"
] | null | null | null | hmd/tests/test_parser.py | fossabot/hmd | dda00daea71449d7338b573e11a24b2db7dbd7c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from src.abstract.automata.automata import AbstractAutomataMachine
from src.abstract.lexer.lexer import AbstractLexer
from src.abstract.lexer.token import AbstractToken
from src.abstract.parser.parser import AbstractParser
from src.mindslab.grammar import HMDGrammar
from src.mindslab.syntax import *
import unittest
class TestParser(unittest.TestCase):
'''
: unit tests for parser class.
'''
lexer = AbstractLexer(HMDSyntaxDefault)
parser = AbstractParser(HMDGrammar())
def test_parser_empty_string(self):
attempt = self.parser.parse('')
answer = []
self.assertEqual(attempt, answer)
def test_parser_empty_list(self):
attempt = self.parser.parse([])
self.assertFalse(attempt)
def test_parser_empty_list_string(self):
attempt = self.parser.parse([''])
self.assertFalse(attempt)
def test_parser_empty_list_strings(self):
attempt = self.parser.parse(['',''])
self.assertFalse(attempt)
#
# syntax: valid
#
#
# syntax: invalid
#
def test_parser_invalid_syntax_empty(self):
hmd = '()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_leading_double(self):
hmd = '(()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_trailing_double(self):
hmd = '())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nested(self):
hmd = '(())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel(self):
hmd = '+'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequels(self):
hmd = '++'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_missing_count(self):
hmd = '(+)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_missing_count_nested(self):
hmd = '(+())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_leading_missing_count(self):
hmd = '+()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_sequel_trailing_missing_count(self):
hmd = '()+'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel(self):
hmd = '-'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequels(self):
hmd = '--'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_missing_count(self):
hmd = '(-)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_missing_count_nested(self):
hmd = '(-())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_leading_missing_count(self):
hmd = '-()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_prequel_trailing_missing_count(self):
hmd = '()-'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following(self):
hmd = '@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_followings(self):
hmd = '@@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_missing_count(self):
hmd = '(@)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_missing_count_nested(self):
hmd = '(@())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_leading_missing_count(self):
hmd = '@()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_following_trailing_missing_count(self):
hmd = '()@'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not(self):
hmd = '!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_space_between(self):
hmd = '! '
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nots(self):
hmd = '!!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_nots_space_between(self):
hmd = '! !'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_missing_count(self):
hmd = '(!)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_missing_count_nested(self):
hmd = '(!())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_leading_missing_count(self):
hmd = '!()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_trailing_missing_count(self):
hmd = '()!'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat(self):
hmd = '^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hats(self):
hmd = '^^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_space_between(self):
hmd = '^ 1'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_missing_count(self):
hmd = '(^)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_missing_count_nested(self):
hmd = '(^())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_leading_missing_count(self):
hmd = '^()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_hat_trailing_missing_count(self):
hmd = '()^'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard(self):
hmd = '%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcards(self):
hmd = '%%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_missing_count(self):
hmd = '(%)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_missing_count_nested(self):
hmd = '(%())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_leading_missing_count(self):
hmd = '%()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_wildcard_trailing_missing_count(self):
hmd = '()%'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or(self):
hmd = '|'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_ors(self):
hmd = '||'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_missing_count(self):
hmd = '(|)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_missing_count_nested(self):
hmd = '(|())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_leading_missing_count(self):
hmd = '|()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_or_trailing_missing_count(self):
hmd = '()|'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier(self):
hmd = '$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifiers(self):
hmd = '$$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_missing_count(self):
hmd = '($)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_missing_count_nested(self):
hmd = '($())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_leading_missing_count(self):
hmd = '$()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_identifier_trailing_missing_count(self):
hmd = '()$'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment(self):
hmd = '='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignments(self):
hmd = '=='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_not_assignments(self):
hmd = '!='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_missing_count(self):
hmd = '(=)'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_missing_count_nested(self):
hmd = '(=())'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_leading_missing_count(self):
hmd = '=()'
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
def test_parser_invalid_syntax_assignment_trailing_missing_count(self):
hmd = '()='
tokens = self.lexer.lex(hmd)
attempt = self.parser.parse(tokens)
self.assertFalse(attempt)
| 32.115385 | 75 | 0.646183 |
6b8ba50052b452c68ea007cc708e5f6db4cf3445 | 570 | py | Python | dear_petition/petition/migrations/0016_auto_20200209_0226.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 4 | 2020-04-01T14:42:45.000Z | 2021-12-12T21:11:11.000Z | dear_petition/petition/migrations/0016_auto_20200209_0226.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 142 | 2019-08-12T19:08:34.000Z | 2022-03-29T23:05:35.000Z | dear_petition/petition/migrations/0016_auto_20200209_0226.py | robert-w-gries/dear-petition | 35244afc8e967b41ae5265ae31fd13b26e4e835a | [
"MIT"
] | 8 | 2020-02-04T20:37:00.000Z | 2021-03-28T13:28:32.000Z | # Generated by Django 2.2.4 on 2020-02-09 02:26
from django.db import migrations
def add_user(apps, schema_editor):
Batch = apps.get_model("petition", "Batch")
User = apps.get_model("users", "User")
# get first user in DB
user = User.objects.order_by("pk").first()
print(f"Setting all Batch.user to {user}")
# set to all existing records
Batch.objects.update(user=user)
class Migration(migrations.Migration):
dependencies = [
("petition", "0015_auto_20200209_0226"),
]
operations = [migrations.RunPython(add_user)]
| 24.782609 | 49 | 0.677193 |
e43752a0e4be33f77f650c631354ca08cd63eb44 | 319 | py | Python | novauniverse/__init__.py | NovaUniverse/NovaUniverse.py | 85e7aea1783ef3e7ca828c86e41fb1d655942656 | [
"MIT"
] | null | null | null | novauniverse/__init__.py | NovaUniverse/NovaUniverse.py | 85e7aea1783ef3e7ca828c86e41fb1d655942656 | [
"MIT"
] | 8 | 2021-12-28T14:45:27.000Z | 2022-02-17T15:38:02.000Z | novauniverse/__init__.py | NovaUniverse/novauniverse.py | 85e7aea1783ef3e7ca828c86e41fb1d655942656 | [
"MIT"
] | null | null | null | """
Nova Universe (Python API Wrapper)
Copyright (c) 2022-present (Dev Goldy)
"""
from . import _find_, info, _server_, _keys_, _license_key_validation_, _mcf_
# Methods
Player = _find_.player
Session = _find_.session
Server = _server_.server
License = _license_key_validation_.license
Mcf = _mcf_.mcf
KEYS = _keys_ | 19.9375 | 77 | 0.76489 |
65169a4518addba9f4ff0d081443754838970ada | 7,988 | py | Python | tomography/workinglib.py | matteoacrossi/adapt_ic-povm | 1c9a0b4b98fafff478aed66686692ec97c0342ae | [
"MIT"
] | null | null | null | tomography/workinglib.py | matteoacrossi/adapt_ic-povm | 1c9a0b4b98fafff478aed66686692ec97c0342ae | [
"MIT"
] | null | null | null | tomography/workinglib.py | matteoacrossi/adapt_ic-povm | 1c9a0b4b98fafff478aed66686692ec97c0342ae | [
"MIT"
] | null | null | null | import numpy as np
from baseconvert import base
import itertools
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
def label(index, s, t):
l = base(index, 10, s, string=True)
return "0" * (t - len(l)) + l
def create_labels_list(s, n):
return [label(i, s, n) for i in range(s ** n)]
def generate_sicpovm_pdf(rho, n):
# SIC-POVM rank-1 projectors
a = {}
a[0] = np.array([1, 0])
a[1] = np.array([1 / np.sqrt(3), np.sqrt(2 / 3)])
a[2] = np.array([1 / np.sqrt(3), np.exp(1j * 2 * np.pi / 3) * np.sqrt(2 / 3)])
a[3] = np.array([1 / np.sqrt(3), np.exp(1j * 4 * np.pi / 3) * np.sqrt(2 / 3)])
p = {}
for i in range(4):
p[i] = np.outer(a[i], a[i].conj()) / 2
# Compute corresponding probabilities
outcomes = create_labels_list(4, n)
pdf = np.zeros(4 ** n)
for i in range(len(outcomes)):
effect = p[int(outcomes[i][0])]
for j in range(1, n):
effect = np.kron(effect, p[int(outcomes[i][j])])
pdf[i] = np.real(np.trace(effect @ rho))
return pdf
def generate_sicpovm_effects(n, order="Standard"):
# SIC-POVM rank-1 projectors
a = {}
a[0] = np.array([1, 0])
a[1] = np.array([1 / np.sqrt(3), np.sqrt(2 / 3)])
a[2] = np.array([1 / np.sqrt(3), np.exp(1j * 2 * np.pi / 3) * np.sqrt(2 / 3)])
a[3] = np.array([1 / np.sqrt(3), np.exp(1j * 4 * np.pi / 3) * np.sqrt(2 / 3)])
p = {i: np.outer(a[i], a[i].conj()) / 2 for i in range(4)}
obs = {}
for ijk in create_labels_list(4, n):
effect = p[int(ijk[0])]
for e in ijk[1:]:
if order == "Standard":
effect = np.kron(p[int(e)], effect)
else:
effect = np.kron(effect, p[int(e)])
obs[ijk] = effect
return obs
def sicpovm_unitary():
h = np.sqrt(1 / 2)
a = np.sqrt(2 / 3)
b = np.exp(-1j * 2 * np.pi / 3)
v1 = np.array([1, a * h, a * h, a * h])
v2 = np.array([0, a, a * b, a * b ** 2])
v3 = np.array([0, a, a * b ** 2, a * b])
v4 = np.array([-1, a * h, a * h, a * h])
U = h * np.array([v1, v2, v3, v4]).T
return U
# Function that adds the measurement unitaries
def sicpovm(qc, q, c, ancilla, ids=[]):
"""
Prepares the measurement circuit.
input:
qc (QuantumCircuit): circuit preparing the state to be measured.
q (QuantumRegister): qubit register including ancillae.
c (ClassicalRegister): cbit register for outcomes.
ancilla (dict): dictionary yielding the ancillary qubit for each logical qubit.
ids (list): list of qubits in the same order as in the outcome string.
ouput:
A quantum circuit implementing the SIC-POVM on every qubit in 'ancilla'.
"""
# Add the SIC-POVM unitaries
for qi in ancilla:
qc.unitary(sicpovm_unitary(), [q[qi], q[ancilla[qi]]])
# Add the measurements
if len(ids) == 0:
ids = sorted(ancilla)
for i, qi in enumerate(ids):
qc.measure(q[qi], c[2 * i])
qc.measure(q[ancilla[qi]], c[2 * i + 1])
return qc
def B(q, i, j, p):
"""
Returns the two-qubit gate that "splits" the excitation between qubits i and j with weight p
"""
B = QuantumCircuit(q)
theta = 2 * np.arcsin(np.sqrt(1.0 - p))
# Bare gate
# B.cu3(theta, 0.0, 0.0, q[i], q[j])
# Proposal by Clément
thetap = np.arcsin(np.cos(theta / 2.0))
B.u3(thetap, 0.0, 0.0, q[j])
B.cx(q[i], q[j])
B.u3(-thetap, 0.0, 0.0, q[j])
B.cx(q[j], q[i])
return B
def B0(q, i, j, p):
"""
Returns the two-qubit gate that "splits" the excitation between qubits i and j with weight p for the first pair
"""
B0 = QuantumCircuit(q)
theta = 2 * np.arcsin(np.sqrt(1.0 - p))
B0.x(q[i])
B0.u3(theta, 0.0, 0.0, q[j])
B0.cx(q[j], q[i])
return B0
def w_state(q, c, protocol):
"""
Consruct the circuit with the corresponding parallelisation. The input list protocol contains a set
of lists with the pairs of B gates to be parallelised at a given time.
"""
qc = QuantumCircuit(q, c)
for pair_index, pair_set in enumerate(protocol):
for i, j, p in pair_set:
if pair_index == 0:
qc += B0(q, i, j, p)
else:
qc += B(q, i, j, p)
# qc.barrier()
return qc
def check_protocol(protocol):
"""
Check if a given protocol works by computing the qubit excitation probabilities
"""
qubit_weight = {}
qubit_weight[protocol[0][0][0]] = 1.0
for pair_set in protocol:
for i, j, p in pair_set:
qubit_weight[j] = qubit_weight[i] * (1.0 - p)
qubit_weight[i] *= p
return qubit_weight
def protocol_from_tree(tree):
"""
Determine the gate paramters from a given tree
"""
# Determine number of descendants
descendants = {}
for i in range(len(tree) - 1, -1, -1):
pair_set = tree[i]
for i, j in pair_set:
if i not in descendants:
descendants[i] = 1
if j not in descendants:
descendants[j] = 1
descendants[i] += descendants[j]
# Assign probabilities to edges
protocol = []
excitations = {}
excitations[tree[0][0][0]] = len(descendants)
for pair_set in tree:
new_pair_set = []
for i, j in pair_set:
p = 1.0 - float(descendants[j]) / excitations[i]
excitations[j] = descendants[j]
excitations[i] -= excitations[j]
new_pair_set.append((i, j, p))
protocol.append(new_pair_set)
return protocol
def simplify(outcome):
n = len(outcome)
if n % 2 != 0:
print("Odd number of qubits")
simplified_outcome = ""
for i in range(0, n, 2):
simplified_outcome += str(2 * int(outcome[i]) + int(outcome[i + 1]))
return simplified_outcome
def marginalise_outcomes(probs, kple):
n = len(list(probs.keys())[0])
k = len(kple)
marginal = {outcome: 0.0 for outcome in create_labels_list(4, k)}
for outcome in probs:
marginal_outcome = ""
for i in range(k):
marginal_outcome += outcome[n - 1 - kple[k - 1 - i]]
marginal[marginal_outcome] += probs[outcome]
return marginal
def compute_all_simplified_marginals(counts, ids, k):
probs = {simplify(outcome): counts[outcome] for outcome in counts}
marginals = {
tuple([ids[i] for i in kple]): marginalise_outcomes(probs, kple)
for kple in itertools.combinations(list(range(len(ids))), k)
}
return marginals
def block_transpose(rho):
dim = len(rho)
if dim == 2:
rhot = rho.T
else:
h = int(dim / 2)
rhot = np.zeros((dim, dim), dtype=complex)
rhot[0:h, 0:h] = rho[0:h, 0:h]
rhot[h:dim, h:dim] = rho[h:dim, h:dim]
rhot[0:h, h:dim] = rho[h:dim, 0:h]
rhot[h:dim, 0:h] = rho[0:h, h:dim]
return rhot
def partial_transpose(rho, qlist, qubit, order="Standard"):
if order == "Qiskit":
qlist = tuple(reversed(qlist))
dim = len(rho)
i = qlist.index(qubit)
k = int(dim / 2 ** i)
rhot = np.zeros((dim, dim), dtype=complex)
for i in range(0, dim, k):
for j in range(0, dim, k):
rhot[i : i + k, j : j + k] = block_transpose(rho[i : i + k, j : j + k])
return rhot
def negativity(rho, qlist, qubit, order="Standard", atol=1e-15):
spectrum = np.linalg.eigh(partial_transpose(rho, qlist, qubit, order=order))[0]
return 2 * np.abs(spectrum[spectrum < atol].sum())
def negativity_list(rho, qlist, qubits, order="Standard", atol=1e-15):
pt = rho
for qubit in qubits:
pt = partial_transpose(pt, qlist, qubit, order=order)
spectrum = np.linalg.eigh(pt)[0]
return 2 * np.abs(spectrum[spectrum < atol].sum())
def p_value(value, distribution):
return len(np.sort(distribution)[np.sort(distribution) >= value]) / len(
distribution
)
| 27.640138 | 115 | 0.566224 |
c11de30920f37423fa786ddc0e16c6af37ee2c62 | 279 | py | Python | doc/python_study_code/random_time.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | doc/python_study_code/random_time.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | doc/python_study_code/random_time.py | beiliwenxiao/vimrc | eb38fc769f3f5f78000060dac674b5c49d63c24c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from random import *
from time import *
date1 = (2008, 1, 1, 0, 0, 0, -1, -1, -1)
time1 = mktime(date1)
date2 = (2009, 1, 1, 0, 0, 0, -1, -1, -1)
time2 = mktime(date2)
random_time = uniform(time1, time2)
print asctime(localtime(random_time))
| 25.363636 | 41 | 0.648746 |
6a154477f36903e061e3c23d722f0709370d8fce | 19,234 | py | Python | 20_Raspberry_Pi_project/DStarLiteView.py | robodhhb/Interactive-D-Star-Lite | 9f3b60631e4c468525d17cce3e0c1e072d876dbd | [
"MIT"
] | 5 | 2021-03-26T21:07:32.000Z | 2022-03-02T07:18:36.000Z | 20_Raspberry_Pi_project/DStarLiteView.py | robodhhb/Interactive-D-Star-Lite | 9f3b60631e4c468525d17cce3e0c1e072d876dbd | [
"MIT"
] | null | null | null | 20_Raspberry_Pi_project/DStarLiteView.py | robodhhb/Interactive-D-Star-Lite | 9f3b60631e4c468525d17cce3e0c1e072d876dbd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
############################################################
# Class DStarLiteView
# The class DStarLiteView implements an interactive view
# for the vertexGrid of the D*Lite algorithm. It implements
# the interactive design of the terrain with start-, goalnode
# and obstacles and the pathplanning and path execution.
#
# File: DStarLiteView.py
# Author: Detlef Heinze
# Version: 1.1 Date: 22.07.2020
###########################################################
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from DStarLitePlanner import *
import enum
# Possible states of the application
class AppState(enum.Enum):
inDesign = 0
inPlanning = 1
planPresent = 2
inExecution = 3
afterExecution= 4
class DStarLiteView(object):
#Initialize a new DStarLiteView
def __init__(self, master):
self.master = master
self.master.geometry('700x600')
self.master.resizable(0, 0)
master.title("Interactive D* Lite 1.0")
self.appState= AppState.inDesign
#Default planning grid size
self.gridHeight= 4
self.gridWidth= 5
#tab control: designTab, row=0
self.tab_control = ttk.Notebook(master)
self.designTab = ttk.Frame(self.tab_control)
self.planTab = ttk.Frame(self.tab_control)
self.execTab = ttk.Frame(self.tab_control)
self.tab_control.add(self.designTab, text='Design')
self.tab_control.add(self.planTab, text='Planning')
self.tab_control.add(self.execTab, text='Execution')
self.lblGridWith = Label(self.designTab, text="Grid width:")
self.lblGridWith.grid(column=0, row=0, pady= 5, sticky= W)
self.gridWidthVal= IntVar()
self.gridWidthVal.set(self.gridWidth)
self.spinGridWidth= Spinbox(self.designTab, from_=4, to=11, width=5, state='readonly',textvariable=self.gridWidthVal)
self.spinGridWidth.grid(column=1, row=0, pady=5, sticky= W)
self.lblGridHeight = Label(self.designTab, text="Grid height:")
self.lblGridHeight.grid(column=2, row=0, pady= 5, sticky= W)
self.gridHeightVal= IntVar()
self.gridHeightVal.set(self.gridHeight)
self.spinGridHeight= Spinbox(self.designTab, from_=4, to=11, width=5, state='readonly',textvariable=self.gridHeightVal)
self.spinGridHeight.grid(column=3, row=0, pady=5, sticky= W)
self.btnRecreate= Button(self.designTab, text="Recreate grid", command=self.btnRecreate_clicked)
self.btnRecreate.grid(column=4, row=0, pady=5, padx= 5, sticky=W)
self.tab_control.grid(column=0, row=0)
#tab control: designTab, row=1
self.lblClickMode= Label(self.designTab, text="Click mode:")
self.lblClickMode.grid(column=0, row=1, pady= 5, sticky= W)
self.clickModeVal= IntVar()
self.clickModeVal.set(1)
self.rad1 = Radiobutton(self.designTab,text='Start', value=1, variable=self.clickModeVal)
self.rad2 = Radiobutton(self.designTab,text='Goal', value=2, variable=self.clickModeVal)
self.rad3 = Radiobutton(self.designTab,text='Obstacle', value=3, variable=self.clickModeVal)
self.rad1.grid(column=1, row=1)
self.rad2.grid(column=2, row=1)
self.rad3.grid(column=3, row=1)
#tab control: planningTab
self.lblMode= Label(self.planTab, text="Planning mode:")
self.lblMode.grid(column=0, row=0, sticky= W)
self.cbPlanningMode= ttk.Combobox(self.planTab, state="readonly", values=('Fast','Slow step', 'Manual step'),
width=12)
self.cbPlanningMode.current(0)
self.cbPlanningMode.grid(column=1, row=0, pady= 5, padx=0, sticky= W)
self.h0_check = BooleanVar()
self.h0_check.set(FALSE) #set check state
self.h0Check = Checkbutton(self.planTab, text='h = 0', state=NORMAL, var=self.h0_check)
self.h0Check.grid(column=2, row=0, padx=10)
self.directNeigbors = BooleanVar()
self.directNeigbors.set(True) #False= 8 neighbors, True= 4 neighbors
self.neighbors = Checkbutton(self.planTab, text='Only direct neighbors(4)', var=self.directNeigbors)
self.neighbors.grid(column=3, row=0, padx=10)
self.btnPlan= Button(self.planTab, text="Start planning", command=self.btnPlan_clicked)
self.btnPlan.grid(column=4, row=0, pady=5, padx= 20,sticky=W)
self.lblPlanHint=Label(self.planTab, text="Planning hint:")
self.lblPlanHint.grid(column=0, row=1, pady= 5, sticky= W)
self.planHint=StringVar()
self.planHint.set('-')
self.lblPlanHintText= Label(self.planTab, text="", textvariable= self.planHint)
self.lblPlanHintText.grid(column=1, row=1, pady= 5, columnspan=2, sticky= W)
#tab control: execTab
self.lblExecMode= Label(self.execTab, text="Execution mode:")
self.lblExecMode.grid(column=0, row=0, sticky= W)
self.cbExecMode= ttk.Combobox(self.execTab, state="readonly", values=('Screen Simulation', 'Lego EV3 Control'),
width=18)
self.cbExecMode.current(0)
self.cbExecMode.grid(column=1, row=0, pady= 5, padx=0, sticky= W)
self.lblRoboOrientation= Label(self.execTab, text="Robot start orientation:")
self.lblRoboOrientation.grid(column=2, row=0, padx=8, sticky= W)
self.cbRoboOrientation= ttk.Combobox(self.execTab, state="readonly", values=('North', 'East', 'South', 'West'),
width=8)
self.cbRoboOrientation.current(0)
self.cbRoboOrientation.grid(column=3, row=0, pady= 5, padx=0, sticky= W)
self.btnExec= Button(self.execTab, text="Execute plan", command=self.btnExec_clicked)
self.btnExec.grid(column=4, row=0, pady=5, padx= 20,sticky=W)
self.lblExecHint=Label(self.execTab, text="Execution hint:")
self.lblExecHint.grid(column=0, row=1, pady= 5, sticky= W)
self.execHint= StringVar()
self.execHint.set('-')
self.lblExecHintText= Label(self.execTab, text="", textvariable= self.execHint)
self.lblExecHintText.grid(column=1, row=1, pady= 5, columnspan=2, sticky= W)
#Row = 2 the grid
self.createGrid()
#### Eventhandler ####################################################################
# Button "Recreate" has been clicked.
def btnRecreate_clicked(self):
#Recreate the planning grid including all variables
self.planHint.set('-')
self.master.update()
self.createGrid()
self.h0Check.config(state="normal")
self.neighbors.config(state="normal")
self.appState= AppState.inDesign
#Button "Start Planning" has been clicked. Execute planning
def btnPlan_clicked(self):
if self.appState != AppState.inDesign:
messagebox.showinfo('Hint', 'Plan already created')
return
self.tab_control.tab(0, state="disabled")
self.tab_control.tab(2, state="disabled")
#Check business rules
if self.planner.areStartAndGoalSet():
self.planner.hIsZero= self.h0_check.get()
self.planner.directNeighbors= self.directNeigbors.get()
self.planHint.set('Planning in progress.......')
self.appState= AppState.inPlanning
self.master.update()
self.planner.mainPlanning(self.cbPlanningMode.get())
if self.planner.planReady:
self.appState= AppState.planPresent
self.h0Check.config(state="disabled")
self.neighbors.config(state="disabled")
self.planHint.set('Planning successful within ' + str(self.planner.planSteps) + ' steps')
messagebox.showinfo('Hint', 'Plan is ready')
else:
self.appState= AppState.inDesign
self.planHint.set('Planning unsuccessful !!!')
messagebox.showinfo('Hint', 'No plan exists')
messagebox.showinfo('Hint', 'Recreating grid')
self.btnRecreate_clicked()
else:
messagebox.showinfo('Hint', 'Start- and/or Goalvertex is not definied')
self.appState= AppState.inDesign
self.tab_control.tab(0, state="normal")
self.tab_control.tab(2, state="normal")
# Button "Execute" has been clicked
def btnExec_clicked(self):
#Check business rules
if not self.planner.planReady:
messagebox.showinfo('Hint', 'No plan present. Goto design and planning tab.')
else:
self.appState= AppState.inExecution
self.tab_control.tab(0, state="disabled")
self.tab_control.tab(1, state="disabled")
self.clickModeVal.set(3) #Obstacle Mode
self.execHint.set('Click to add obstacles during plan execution')
result= self.planner.executePlan(self.cbExecMode.get())
if result[0]:
messagebox.showinfo('Hint', 'Plan has been executed!')
self.appState= AppState.afterExecution
else:
messagebox.showinfo('Hint', result[1])
self.appState= AppState.planPresent
self.planHint.set('-')
self.tab_control.tab(0, state="normal")
self.tab_control.tab(1, state="normal")
# Calculate the x and y coordinate of vertexGrid which the user has clicked.
# If a g- or rsh- value was clickes find the rectangle below it.
# Return 4 values: True, if click was within a rectangle,
# the x anf y coord of the rectangle in vertexGrid
# and the rectangle clicked
def getClickInRectangle(self, current):
if self.canvGrid.gettags(current)[0] == 'rect':
return True, self.canvGrid.gettags(current)[1],\
self.canvGrid.gettags(current)[2],\
current
if self.canvGrid.gettags(current)[0] == 'gtext':
below= self.canvGrid.find_below(current)
if self.canvGrid.gettags(below)[0] == 'rect':
return True, self.canvGrid.gettags(below)[1],\
self.canvGrid.gettags(below)[2],\
below
if self.canvGrid.gettags(current)[0] == 'rshtext':
below= self.canvGrid.find_below(current)
below1= self.canvGrid.find_below(below)
if self.canvGrid.gettags(below1)[0] == 'rect':
return True, self.canvGrid.gettags(below1)[1],\
self.canvGrid.gettags(below1)[2],\
below1
else: #no rectangle clicked
return False, 0, 0, current
#Handle the click-event in the canvas if appState is inDesing or inExecution
def canv_clicked(self, event):
print("clicked at", event.x, event.y)
if (self.appState == AppState.inDesign or self.appState == AppState.inExecution) and \
self.canvGrid.find_withtag(CURRENT):
print(self.canvGrid.gettags(CURRENT))
print(self.getClickInRectangle(CURRENT))
result= self.getClickInRectangle(CURRENT)
if result[0]: #Click within a rectangle of the grid
x= result[1] #x coordinate in grid
y= result[2] #y coordiante in grid
clickMode= self.clickModeVal.get()
if not self.isNodeOccupied(x,y,clickMode):
if clickMode == 1:
#Set start node
if self.planner.getStartCoordinates()[0] != float('inf'):
tag= str(self.planner.getStartCoordinates()[0]) + '-' + str(self.planner.getStartCoordinates()[1])
handle=self.canvGrid.find_withtag(tag)
self.canvGrid.itemconfig(handle, fill= "white")
self.canvGrid.itemconfig(result[3], fill="green")
self.planner.setStartCoordinates(x,y)
elif clickMode == 2:
#Set goal node
oldGoal= self.planner.getGoalCoordinates()[0] != float('inf')
if oldGoal:
oldGoalCoord= self.planner.getGoalCoordinates()
tag= str(oldGoalCoord[0]) + '-' + str(oldGoalCoord[1])
handle=self.canvGrid.find_withtag(tag)
self.canvGrid.itemconfig(handle, fill= "white")
self.canvGrid.itemconfig(result[3], fill="red")
self.planner.setGoalCoordinates(x,y)
self.update_rsh(x,y)
if oldGoal:
self.update_rsh(oldGoalCoord[0], oldGoalCoord[1])
elif clickMode == 3:
#Set or reset obstacale node
node= self.planner.vertexGrid[int(x)][int(y)]
if not node.isObstacle:
node.isObstacle= True
self.canvGrid.itemconfig(result[3], fill="brown")
self.planner.obstacles.add(node)
elif not self.appState == AppState.inExecution:
#Obstacles can only be removed in Design
node.isObstacle= False
self.canvGrid.itemconfig(result[3], fill="white")
self.planner.obstacles.remove(node)
self.update_rsh(x,y)
else:
self.show('Action not possible in this state of planning. Recreate grid.')
def show(self, aMessage):
messagebox.showinfo('Hint', aMessage)
#### Functions ############################################################
#Create a new planner and draw the grid
def createGrid(self):
#Create a planner and initialize it
print('Creating planner')
self.planner= DStarLitePlanner(self,
gridWidth=self.gridWidthVal.get(),
gridHeight= self.gridHeightVal.get(),
hIsZero= self.h0_check.get(),
directNeighbors= self.directNeigbors.get())
horizShift= 30
self.canvGrid = Canvas(self.master, height=800,width= 600 + horizShift)
self.canvGrid.bind("<Button-1>", self.canv_clicked)
self.canvGrid.grid(column=0, row=2, pady=10, padx= 10, columnspan=6, sticky= W)
self.drawPlanningGrid(horizShift)
#Draw the actual status of the planning grid
def drawPlanningGrid(self, horizShift):
#Draw planning grid
self.canvGrid.create_rectangle(0,0, 680, 430, outline="white", fill="white")
self.stepX= 600//self.gridWidthVal.get()
self.stepY= 400//self.gridHeightVal.get()
rowCount= self.gridHeightVal.get()-1
columnCount=0
#Add rectangles with g and rsh values
for i in range(0,600-self.stepX+1, self.stepX):
for j in range(0,400-self.stepY+1, self.stepY):
self.canvGrid.create_rectangle(i+horizShift,j+2, i+horizShift+self.stepX,
j+self.stepY+2, fill="white",
tags=('rect', columnCount, rowCount, str(columnCount)+ '-' + str(rowCount)))
self.canvGrid.create_text(i+horizShift+self.stepX//2, j+2+self.stepY//3,
text='g:' + str(self.planner.vertexGrid[columnCount][rowCount].g),
tags= ('gtext','g-' + str(columnCount)+ '-' + str(rowCount)))
self.canvGrid.create_text(i+horizShift+self.stepX//2-5, j+2+self.stepY//3+15,
text='rsh:' + str(self.planner.vertexGrid[columnCount][rowCount].rsh),
tags= ('rshtext', 'rsh-' + str(columnCount)+ '-' + str(rowCount)))
rowCount-=1
columnCount+=1
rowCount=self.gridHeightVal.get()-1
#Add row and column numbers
rowCount= self.gridHeightVal.get()-1
for i in range(0,400-self.stepY+1,self.stepY):
self.canvGrid.create_text(15, i+self.stepY/2,text=str(rowCount))
rowCount-=1
columnCount= 0
for i in range(0,600, self.stepX ):
self.canvGrid.create_text(i+horizShift+self.stepX/2, 400+ 15, text=str(columnCount))
columnCount+=1
#Update rsh-value on screen
def update_rsh(self, x,y):
tag= 'rsh-'+ str(x) + '-' + str(y)
#print('rsh-tag:' + tag)
handle=self.canvGrid.find_withtag(tag)
value= round(self.planner.vertexGrid[int(x)][int(y)].rsh,2)
self.canvGrid.itemconfig(handle, text='rsh:' + str(value))
#print(value)
#Update g-value on screen
def update_g(self,x,y):
tag= 'g-'+ str(x) + '-' + str(y)
#print('g:' + tag)
handle=self.canvGrid.find_withtag(tag)
value= round(self.planner.vertexGrid[int(x)][int(y)].g,2)
self.canvGrid.itemconfig(handle, text='g:' + str(value))
# Update-color of vertex on screen if it is not the start- or goal-node
def updateColor(self, aVertex, aColor):
tag= str(aVertex.x) + '-' + str(aVertex.y)
handle=self.canvGrid.find_withtag(tag)
self.canvGrid.itemconfig(handle, fill= aColor)
#Check if the clicked rectangle is occupied by other or the same
#type of node regarding the clickMode
def isNodeOccupied(self,x,y,clickMode):
start= self.planner.getStartCoordinates()
if [int(x),int(y)] != start:
goal= self.planner.getGoalCoordinates()
if [int(x),int(y)] != goal:
ob= self.planner.vertexGrid[int(x)][int(y)].isObstacle
if ob and clickMode <= 2:
messagebox.showwarning('Vertex occupied', 'The vertex is occupied by an obstacle')
return True
else:
return False
else:
messagebox.showwarning('Vertex occupied', 'The vertex is occupied by a goal')
return True
else:
messagebox.showwarning('Vertex occupied', 'The vertex is occupied by a start node')
return True
#Print function for test purpose
def dumpVertices(self):
[[self.planner.vertexGrid[x][y].print() for x in range(self.gridWidth)] for y in range(self.gridHeight)]
| 51.290667 | 128 | 0.568317 |
eadfa45f6e3c9b7aa67e8060c948a2ceafd15c9d | 2,620 | py | Python | moztrap/model/core/fixture_gen.py | yifanjiang/moztrap | 2130c7101b7596b19a2697ab5f1c745e93e7c95b | [
"BSD-2-Clause"
] | 1 | 2015-02-10T15:09:42.000Z | 2015-02-10T15:09:42.000Z | moztrap/model/core/fixture_gen.py | yifanjiang/moztrap | 2130c7101b7596b19a2697ab5f1c745e93e7c95b | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/core/fixture_gen.py | yifanjiang/moztrap | 2130c7101b7596b19a2697ab5f1c745e93e7c95b | [
"BSD-2-Clause"
] | null | null | null | """Sample products fixture generator."""
from django.core.management import call_command
from django.contrib.auth.models import User as BaseUser, Group
from .auth import User, Role
from fixture_generator import fixture_generator
from ..environments.models import Profile
from .models import Product, ProductVersion
@fixture_generator(
Product, ProductVersion, requires=[
"environments.sample_environments", "core.sample_users"])
def sample_products():
admin = User.objects.get(username="admin")
browserenvs = Profile.objects.get(name="Browser Testing Environments")
ff = Product.objects.create(name="Firefox", user=admin)
ff9 = ProductVersion.objects.create(version="9", product=ff, user=admin)
ff9.environments.add(*browserenvs.environments.all())
ff10 = ProductVersion.objects.create(version="10", product=ff, user=admin)
ff10.environments.add(*browserenvs.environments.all())
webenvs = Profile.objects.get(name="Website Testing Environments")
cc = Product.objects.create(name="MozTrap", user=admin)
cc6 = ProductVersion.objects.create(version="0.6", product=cc, user=admin)
cc6.environments.add(*webenvs.environments.all())
cc7 = ProductVersion.objects.create(version="0.7", product=cc, user=admin)
cc7.environments.add(*webenvs.environments.all())
cc8 = ProductVersion.objects.create(
version="0.8", product=cc, codename="Django DB", user=admin)
cc8.environments.add(*webenvs.environments.all())
# have to pass fixture-generator the real concrete models
@fixture_generator(Group, BaseUser)
def sample_users():
call_command("create_default_roles", verbosity=0)
tester_role = Role.objects.get(name="Tester")
creator_role = Role.objects.get(name="Test Creator")
manager_role = Role.objects.get(name="Test Manager")
admin_role = Role.objects.get(name="Admin")
# create and delete one user so we avoid using id=1 in the fixture; would
# overwrite the default superuser that may have been created on syncdb.
User.objects.create(username="delete")
User._base_manager.all().delete()
tester = User(username="tester", email="tester@example.com")
creator = User(username="creator", email="creator@example.com")
manager = User(username="manager", email="manager@example.com")
admin = User(username="admin", email="admin@example.com")
for user in [tester, creator, manager, admin]:
user.set_password("testpw")
user.save()
tester.roles.add(tester_role)
creator.roles.add(creator_role)
manager.roles.add(manager_role)
admin.roles.add(admin_role)
| 38.529412 | 78 | 0.730916 |
82d3fc172f7d73c1b868ec7fd408d4729723f4c0 | 33,004 | py | Python | tests/test_dataset_dict.py | aajanki/datasets | 65e224cc11c2c7da09788b903cb028c5629d7f95 | [
"Apache-2.0"
] | 6 | 2021-05-02T17:08:55.000Z | 2022-03-12T14:02:09.000Z | tests/test_dataset_dict.py | albertyumol/datasets | e58ce4b119e08ea1a19873232757ca52f77fc4ac | [
"Apache-2.0"
] | null | null | null | tests/test_dataset_dict.py | albertyumol/datasets | e58ce4b119e08ea1a19873232757ca52f77fc4ac | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from datasets import load_from_disk
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.splits import NamedSplit
from .conftest import s3_test_bucket_name
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_s3,
require_tf,
require_torch,
)
class DatasetDictTest(TestCase):
def _create_dummy_dataset(self, multiple_columns=False):
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict(
{"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]}
)
return dset
def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict:
return DatasetDict(
{
"train": self._create_dummy_dataset(multiple_columns=multiple_columns),
"test": self._create_dummy_dataset(multiple_columns=multiple_columns),
}
)
def test_flatten_in_place(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset.flatten_()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(list(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_flatten(self):
dset_split = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = DatasetDict({"train": dset_split, "test": dset_split})
dset = dset.flatten()
self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]})
self.assertListEqual(list(dset["train"].features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
del dset
def test_set_format_numpy(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="numpy", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], np.int64)
self.assertEqual(dset_split[0]["col_1"].item(), 3)
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], None)
self.assertEqual(dset_split.format["format_kwargs"], {})
self.assertEqual(dset_split.format["columns"], dset_split.column_names)
self.assertEqual(dset_split.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], np.str_)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self):
import torch
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="torch", columns=["col_1", "col_2"])
for dset_split in dset.values():
with self.assertRaises(TypeError):
dset_split[0]
del dset
@require_tf
def test_set_format_tf(self):
import tensorflow as tf
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 1)
self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset_split[0]["col_1"].shape), [])
self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertIsInstance(dset_split[0]["col_2"], str)
self.assertEqual(dset_split[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0]), 2)
self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 1)
self.assertIsInstance(dset_split[0], pd.DataFrame)
self.assertListEqual(list(dset_split[0].shape), [1, 1])
self.assertEqual(dset_split[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].columns), 2)
self.assertEqual(dset_split[0]["col_2"].item(), "a")
del dset
def test_set_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.set_transform(transform=transform, columns=["col_1"])
for dset_split in dset.values():
self.assertEqual(dset_split.format["type"], "custom")
self.assertEqual(len(dset_split[0].keys()), 1)
self.assertEqual(dset_split[0]["col_1"], "3")
self.assertEqual(dset_split[:2]["col_1"], ["3", "2"])
self.assertEqual(dset_split["col_1"][:2], ["3", "2"])
prev_format = dset[list(dset.keys())[0]].format
for dset_split in dset.values():
dset_split.set_format(**dset_split.format)
self.assertEqual(prev_format, dset_split.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(len(dset_split[0].keys()), 2)
self.assertEqual(dset_split[0]["col_2"], "A")
del dset
def test_with_format(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_format("numpy", columns=["col_1"])
dset.set_format("numpy", columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_with_transform(self):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset2 = dset.with_transform(transform, columns=["col_1"])
dset.set_transform(transform, columns=["col_1"])
for dset_split, dset_split2 in zip(dset.values(), dset2.values()):
self.assertDictEqual(dset_split.format, dset_split2.format)
del dset, dset2
def test_cast_in_place(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset.cast_(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_cast(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
features = dset["train"].features
features["col_1"] = Value("float64")
dset = dset.cast(features)
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertEqual(dset_split.features["col_1"], Value("float64"))
self.assertIsInstance(dset_split[0]["col_1"], float)
del dset
def test_remove_columns_in_place(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.remove_columns_(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.remove_columns_(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
del dset
def test_remove_columns(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names="col_1")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 1)
self.assertListEqual(list(dset_split.column_names), ["col_2"])
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.remove_columns(column_names=["col_1", "col_2"])
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 0)
del dset
def test_rename_column_in_place(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset.rename_column_(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_rename_column(self):
dset = self._create_dummy_dataset_dict(multiple_columns=True)
dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name")
for dset_split in dset.values():
self.assertEqual(dset_split.num_columns, 2)
self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"])
del dset
def test_map(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys()))
self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"])
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
mapped_dsets_2: DatasetDict = mapped_dsets_1.map(
lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys()))
self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"]))
del dsets, mapped_dsets_1, mapped_dsets_2
def test_filter(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys()))
self.assertEqual(len(filtered_dsets_1["train"]), 10)
cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
filtered_dsets_2: DatasetDict = filtered_dsets_1.filter(
lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys()))
self.assertEqual(len(filtered_dsets_2["train"]), 5)
filtered_dsets_3: DatasetDict = dsets.filter(
lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True
)
self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys()))
self.assertEqual(len(filtered_dsets_3["train"]), 10)
del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3
def test_sort(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
sorted_dsets_1: DatasetDict = dsets.sort("filename")
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]],
sorted(f"{x:03d}" for x in range(30)),
)
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
sorted_dsets_2: DatasetDict = sorted_dsets_1.sort(
"filename", indices_cache_file_names=indices_cache_file_names, reverse=True
)
self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys()))
self.assertListEqual(
[f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]],
sorted((f"{x:03d}" for x in range(30)), reverse=True),
)
del dsets, sorted_dsets_1, sorted_dsets_2
def test_shuffle(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
indices_cache_file_names = {
"train": os.path.join(tmp_dir, "train.arrow"),
"test": os.path.join(tmp_dir, "test.arrow"),
}
seeds = {
"train": 1234,
"test": 1234,
}
dsets_shuffled = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"])
self.assertEqual(len(dsets_shuffled["train"]), 30)
self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028")
self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010")
self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")}))
self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")}))
# Reproducibility
indices_cache_file_names_2 = {
"train": os.path.join(tmp_dir, "train_2.arrow"),
"test": os.path.join(tmp_dir, "test_2.arrow"),
}
dsets_shuffled_2 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False
)
self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"])
seeds = {
"train": 1234,
"test": 1,
}
indices_cache_file_names_3 = {
"train": os.path.join(tmp_dir, "train_3.arrow"),
"test": os.path.join(tmp_dir, "test_3.arrow"),
}
dsets_shuffled_3 = dsets.shuffle(
seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False
)
self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"])
# other input types
dsets_shuffled_int = dsets.shuffle(42)
dsets_shuffled_alias = dsets.shuffle(seed=42)
dsets_shuffled_none = dsets.shuffle()
self.assertEqual(len(dsets_shuffled_int["train"]), 30)
self.assertEqual(len(dsets_shuffled_alias["train"]), 30)
self.assertEqual(len(dsets_shuffled_none["train"]), 30)
del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3
del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none
def test_check_values_type(self):
dsets = self._create_dummy_dataset_dict()
dsets["bad_split"] = None
self.assertRaises(TypeError, dsets.map, lambda x: x)
self.assertRaises(TypeError, dsets.filter, lambda x: True)
self.assertRaises(TypeError, dsets.shuffle)
self.assertRaises(TypeError, dsets.sort, "filename")
del dsets
def test_serialization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["test", "train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
self.assertEqual(len(reloaded_dsets["test"]), 30)
self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"])
del reloaded_dsets
del dsets["test"]
dsets.save_to_disk(tmp_dir)
reloaded_dsets = DatasetDict.load_from_disk(tmp_dir)
self.assertListEqual(sorted(reloaded_dsets), ["train"])
self.assertEqual(len(reloaded_dsets["train"]), 30)
self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"])
del dsets, reloaded_dsets
def test_load_from_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dsets = self._create_dummy_dataset_dict()
dsets.save_to_disk(tmp_dir)
del dsets
dsets = load_from_disk(tmp_dir)
self.assertListEqual(sorted(dsets), ["test", "train"])
self.assertEqual(len(dsets["train"]), 30)
self.assertListEqual(dsets["train"].column_names, ["filename"])
self.assertEqual(len(dsets["test"]), 30)
self.assertListEqual(dsets["test"].column_names, ["filename"])
del dsets
def test_align_labels_with_mapping(self):
train_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
}
)
test_features = Features(
{
"input_text": Value("string"),
"input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]),
}
)
train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]}
test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]}
label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1}
id2label = {v: k for k, v in label2id.items()}
train_expected_labels = [2, 2, 1, 1, 0, 0]
test_expected_labels = [2, 2, 0, 0, 1, 1]
train_expected_label_names = [id2label[idx] for idx in train_expected_labels]
test_expected_label_names = [id2label[idx] for idx in test_expected_labels]
dsets = DatasetDict(
{
"train": Dataset.from_dict(train_data, features=train_features),
"test": Dataset.from_dict(test_data, features=test_features),
}
)
dsets = dsets.align_labels_with_mapping(label2id, "input_labels")
self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"])
self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"])
train_aligned_label_names = [
dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"]
]
test_aligned_label_names = [
dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"]
]
self.assertListEqual(train_expected_label_names, train_aligned_label_names)
self.assertListEqual(test_expected_label_names, test_aligned_label_names)
def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_csv_split(split, csv_path, tmp_path):
if split:
path = {split: csv_path}
else:
split = "train"
path = {"train": csv_path, "test": csv_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_csv(path, cache_dir=cache_dir)
_check_csv_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path):
if split:
path = {split: jsonl_path}
else:
split = "train"
path = {"train": jsonl_path, "test": jsonl_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_json(path, cache_dir=cache_dir)
_check_json_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path):
if split:
path = {split: parquet_path}
else:
split = "train"
path = {"train": parquet_path, "test": parquet_path}
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir)
_check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = DatasetDict.from_text(path, cache_dir=cache_dir)
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
@require_s3
def test_dummy_dataset_serialize_s3(s3, dataset):
dsets = DatasetDict({"train": dataset, "test": dataset.select(range(2))})
mock_bucket = s3_test_bucket_name
dataset_path = f"s3://{mock_bucket}/datasets/dict"
column_names = dsets["train"].column_names
lengths = [len(dset) for dset in dsets.values()]
dataset.save_to_disk(dataset_path, s3)
dataset = dataset.load_from_disk(dataset_path, s3)
assert sorted(dsets) == ["test", "train"]
assert [len(dset) for dset in dsets.values()] == lengths
assert dsets["train"].column_names == column_names
assert dsets["test"].column_names == column_names
| 45.966574 | 119 | 0.641528 |
35567d19376495f9f731ab96cf9259a2e4519d64 | 18,757 | py | Python | official/vision/beta/configs/semantic_segmentation.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2017-10-26T06:23:51.000Z | 2020-09-11T21:09:41.000Z | official/vision/beta/configs/semantic_segmentation.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2018-06-18T17:08:12.000Z | 2021-04-12T05:39:04.000Z | official/vision/beta/configs/semantic_segmentation.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2020-04-11T19:31:17.000Z | 2021-04-07T12:53:28.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Semantic segmentation configuration definition."""
import os
from typing import List, Optional, Union
import dataclasses
import numpy as np
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import config_definitions as cfg
from official.vision.beta.configs import backbones
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
output_size: List[int] = dataclasses.field(default_factory=list)
# If train_on_crops is set to True, a patch of size output_size is cropped
# from the input image.
train_on_crops: bool = False
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
# If resize_eval_groundtruth is set to False, original image sizes are used
# for eval. In that case, groundtruth_padded_size has to be specified too to
# allow for batching the variable input sizes of images.
resize_eval_groundtruth: bool = True
groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_hflip: bool = True
drop_remainder: bool = True
file_type: str = 'tfrecord'
@dataclasses.dataclass
class SegmentationHead(hyperparams.Config):
level: int = 3
num_convs: int = 2
num_filters: int = 256
upsample_factor: int = 1
feature_fusion: Optional[str] = None # None, deeplabv3plus, or pyramid_fusion
# deeplabv3plus feature fusion params
low_level: int = 2
low_level_num_filters: int = 48
@dataclasses.dataclass
class SemanticSegmentationModel(hyperparams.Config):
"""Semantic segmentation model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
head: SegmentationHead = SegmentationHead()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(type='identity')
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class Losses(hyperparams.Config):
label_smoothing: float = 0.0
ignore_label: int = 255
class_weights: List[float] = dataclasses.field(default_factory=list)
l2_weight_decay: float = 0.0
use_groundtruth_dimension: bool = True
top_k_percent_pixels: float = 1.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
report_per_class_iou: bool = True
report_train_mean_iou: bool = True # Turning this off can speed up training.
@dataclasses.dataclass
class SemanticSegmentationTask(cfg.TaskConfig):
"""The model config."""
model: SemanticSegmentationModel = SemanticSegmentationModel()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
evaluation: Evaluation = Evaluation()
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
@exp_factory.register_config_factory('semantic_segmentation')
def semantic_segmentation() -> cfg.ExperimentConfig:
"""Semantic segmentation general."""
return cfg.ExperimentConfig(
task=SemanticSegmentationModel(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
# PASCAL VOC 2012 Dataset
PASCAL_TRAIN_EXAMPLES = 10582
PASCAL_VAL_EXAMPLES = 1449
PASCAL_INPUT_PATH_BASE = 'pascal_voc_seg'
@exp_factory.register_config_factory('seg_deeplabv3_pascal')
def seg_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [12, 24, 36] # [6, 12, 18] if output_stride = 16
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
multigrid=multigrid, stem_type=stem_type)),
decoder=decoders.Decoder(
type='aspp', aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(level=level, num_convs=0),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
# TODO(arashwan): test changing size to 513 to match deeplab.
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_deeplabv3plus_pascal')
def seg_deeplabv3plus_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
stem_type=stem_type, multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_resnetfpn_pascal')
def seg_resnetfpn_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet-fpn."""
train_batch_size = 256
eval_batch_size = 32
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[512, 512, 3],
min_level=3,
max_level=7,
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
head=SegmentationHead(level=3, num_convs=3),
norm_activation=common.NormActivation(
activation='swish',
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.2,
aug_scale_max=1.5),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=450 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 450 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Cityscapes Dataset (Download and process the dataset yourself)
CITYSCAPES_TRAIN_EXAMPLES = 2975
CITYSCAPES_VAL_EXAMPLES = 500
CITYSCAPES_INPUT_PATH_BASE = 'cityscapes'
@exp_factory.register_config_factory('seg_deeplabv3plus_cityscapes')
def seg_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
# Cityscapes uses only 19 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=19,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
stem_type=stem_type, multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates,
pool_kernel_size=[512, 1024])),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
'train_fine**'),
output_size=[512, 1024],
train_on_crops=True,
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
output_size=[1024, 2048],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=500 * steps_per_epoch,
validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'decay_steps': 500 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 38.357873 | 112 | 0.602175 |
8492b3dcf65ba96d8a4aa81ba0302dff18f25cef | 4,072 | py | Python | test/tomography/test_state_tomography.py | imaihal/qiskit-ignis | ca97a7482d648e09d6951544565f51c7573e6b72 | [
"Apache-2.0"
] | null | null | null | test/tomography/test_state_tomography.py | imaihal/qiskit-ignis | ca97a7482d648e09d6951544565f51c7573e6b72 | [
"Apache-2.0"
] | null | null | null | test/tomography/test_state_tomography.py | imaihal/qiskit-ignis | ca97a7482d648e09d6951544565f51c7573e6b72 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring
import unittest
import numpy
import qiskit
from qiskit import QuantumRegister, QuantumCircuit, Aer
from qiskit.quantum_info import state_fidelity
import qiskit.ignis.verification.tomography as tomo
import qiskit.ignis.verification.tomography.fitters.cvx_fit as cvx_fit
def run_circuit_and_tomography(circuit, qubits):
job = qiskit.execute(circuit, Aer.get_backend('statevector_simulator'))
psi = job.result().get_statevector(circuit)
qst = tomo.state_tomography_circuits(circuit, qubits)
job = qiskit.execute(qst, Aer.get_backend('qasm_simulator'),
shots=5000)
tomo_fit = tomo.StateTomographyFitter(job.result(), qst)
rho_cvx = tomo_fit.fit(method='cvx')
rho_mle = tomo_fit.fit(method='lstsq')
return (rho_cvx, rho_mle, psi)
class TestFitter(unittest.TestCase):
def test_trace_constraint(self):
p = numpy.array([1/2, 1/2, 1/2, 1/2, 1/2, 1/2])
# the basis matrix for 1-qubit measurement in the Pauli basis
A = numpy.array([
[0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j],
[0.5 + 0.j, -0.5 + 0.j, -0.5 + 0.j, 0.5 + 0.j],
[0.5 + 0.j, 0. - 0.5j, 0. + 0.5j, 0.5 + 0.j],
[0.5 + 0.j, 0. + 0.5j, 0. - 0.5j, 0.5 + 0.j],
[1. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 0. + 0.j, 1. + 0.j]
])
for trace_value in [1, 0.3, 2, 0, 42]:
rho = cvx_fit.cvx_fit(p, A, trace=trace_value)
self.assertAlmostEqual(numpy.trace(rho), trace_value, places=3)
class TestStateTomography(unittest.TestCase):
def test_bell_2_qubits(self):
q2 = QuantumRegister(2)
bell = QuantumCircuit(q2)
bell.h(q2[0])
bell.cx(q2[0], q2[1])
rho_cvx, rho_mle, psi = run_circuit_and_tomography(bell, q2)
F_bell_cvx = state_fidelity(psi, rho_cvx)
self.assertAlmostEqual(F_bell_cvx, 1, places=1)
F_bell_mle = state_fidelity(psi, rho_mle)
self.assertAlmostEqual(F_bell_mle, 1, places=1)
def test_bell_3_qubits(self):
q3 = QuantumRegister(3)
bell = QuantumCircuit(q3)
bell.h(q3[0])
bell.cx(q3[0], q3[1])
bell.cx(q3[1], q3[2])
rho_cvx, rho_mle, psi = run_circuit_and_tomography(bell, q3)
F_bell_cvx = state_fidelity(psi, rho_cvx)
self.assertAlmostEqual(F_bell_cvx, 1, places=1)
F_bell_mle = state_fidelity(psi, rho_mle)
self.assertAlmostEqual(F_bell_mle, 1, places=1)
def test_complex_1_qubit_circuit(self):
q = QuantumRegister(1)
circ = QuantumCircuit(q)
circ.u3(1, 1, 1, q[0])
rho_cvx, rho_mle, psi = run_circuit_and_tomography(circ, q)
F_bell_cvx = state_fidelity(psi, rho_cvx)
self.assertAlmostEqual(F_bell_cvx, 1, places=1)
F_bell_mle = state_fidelity(psi, rho_mle)
self.assertAlmostEqual(F_bell_mle, 1, places=1)
def test_complex_3_qubit_circuit(self):
def rand_angles():
return tuple(2 * numpy.pi * numpy.random.random(3) - numpy.pi)
q = QuantumRegister(3)
circ = QuantumCircuit(q)
for j in range(3):
circ.u3(*rand_angles(), q[j])
rho_cvx, rho_mle, psi = run_circuit_and_tomography(circ, q)
F_bell_cvx = state_fidelity(psi, rho_cvx)
self.assertAlmostEqual(F_bell_cvx, 1, places=1)
F_bell_mle = state_fidelity(psi, rho_mle)
self.assertAlmostEqual(F_bell_mle, 1, places=1)
if __name__ == '__main__':
unittest.main()
| 35.103448 | 77 | 0.638998 |
8669320cb6895c25422a0f00c59ed6cbc58f3ce2 | 2,597 | py | Python | pykinect_azure/k4a/calibration.py | necoxt/pyKinectAzure | 1e1fa845bd8299b7534a647f12ca0b49c5bc57d4 | [
"MIT"
] | null | null | null | pykinect_azure/k4a/calibration.py | necoxt/pyKinectAzure | 1e1fa845bd8299b7534a647f12ca0b49c5bc57d4 | [
"MIT"
] | null | null | null | pykinect_azure/k4a/calibration.py | necoxt/pyKinectAzure | 1e1fa845bd8299b7534a647f12ca0b49c5bc57d4 | [
"MIT"
] | null | null | null | import ctypes
from pykinect_azure.k4a import _k4a
class Calibration:
def __init__(self, calibration_handle):
self._handle = calibration_handle
def __del__(self):
self.reset()
def __str__(self):
params = self._handle.color_camera_calibration.intrinsics.parameters.param
message = (
"Rgb Intrinsic parameters: \n"
f"\tcx: {params.cx}\n"
f"\tcy: {params.cy}\n"
f"\tfx: {params.fx}\n"
f"\tfy: {params.fy}\n"
f"\tk1: {params.k1}\n"
f"\tk2: {params.k2}\n"
f"\tk3: {params.k3}\n"
f"\tk4: {params.k4}\n"
f"\tk5: {params.k5}\n"
f"\tk6: {params.k6}\n"
f"\tcodx: {params.codx}\n"
f"\tcody: {params.cody}\n"
f"\tp2: {params.p2}\n"
f"\tp1: {params.p1}\n"
f"\tmetric_radius: {params.metric_radius}\n"
)
return message
def is_valid(self):
return self._handle
def handle(self):
return self._handle
def reset(self):
if self.is_valid():
self._handle = None
def convert_3d_to_3d(self, source_point3d, source_camera, target_camera):
target_point3d = _k4a.k4a_float3_t()
_k4a.VERIFY(_k4a.k4a_calibration_3d_to_3d(self._handle, source_point3d,source_camera,target_camera,target_point3d),"Failed to convert from 3D to 3D")
return target_point3d
def convert_2d_to_3d(self, source_point2d, source_depth, source_camera, target_camera):
target_point3d = _k4a.k4a_float3_t()
valid = ctypes.c_int()
_k4a.VERIFY(_k4a.k4a_calibration_2d_to_3d(self._handle, source_point2d, source_depth, source_camera,target_camera,target_point3d, valid),"Failed to convert from 2D to 3D")
return target_point3d
def convert_3d_to_2d(self, source_point3d, source_camera, target_camera):
target_point2d = _k4a.k4a_float2_t()
valid = ctypes.c_int()
_k4a.VERIFY(_k4a.k4a_calibration_3d_to_2d(self._handle, source_point3d, source_camera,target_camera,target_point2d, valid),"Failed to convert from 3D to 2D")
return target_point2d
def convert_2d_to_2d(self, source_point2d, source_depth, source_camera, target_camera):
target_point2d = _k4a.k4a_float2_t()
valid = ctypes.c_int()
_k4a.VERIFY(_k4a.k4a_calibration_2d_to_2d(self._handle, source_point2d, source_depth, source_camera,target_camera,target_point2d, valid),"Failed to convert from 2D to 2D")
return target_point2d
def convert_color_2d_to_depth_2d(self, source_point2d, depth_image):
target_point2d = _k4a.k4a_float2_t()
valid = ctypes.c_int()
_k4a.VERIFY(_k4a.k4a._k4a_calibration_color_2d_to_depth_2d(self._handle, source_point2d, depth_image, target_point2d, valid),"Failed to convert from Color 2D to Depth 2D")
return target_point2d
| 27.924731 | 173 | 0.747016 |
3807fb7f779c2b300d15bf5d4ca12d9beb9c9c74 | 4,756 | py | Python | tutorials_for_myself/my_learn2learn/learn2learn_cherry_distributed.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 5 | 2021-03-13T16:07:26.000Z | 2021-09-09T17:00:36.000Z | tutorials_for_myself/my_learn2learn/learn2learn_cherry_distributed.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 8 | 2021-03-09T21:52:09.000Z | 2021-12-02T17:23:33.000Z | tutorials_for_myself/my_learn2learn/learn2learn_cherry_distributed.py | pestun/ultimate-utils | 676002e80422067256c43172a78825ed12954bcb | [
"MIT"
] | 5 | 2021-03-24T20:38:43.000Z | 2022-03-17T07:54:12.000Z | #%%
"""
test a basic distributed example for learn2learn
refs:
- l2l:
- https://github.com/learnables/learn2learn/issues/197
- https://pytorch.org/docs/stable/distributed.html#launch-utility
- torchmeta
- https://stackoverflow.com/questions/69730835/how-does-one-create-a-distributed-data-loader-with-pytorchs-torchmeta
- https://github.com/tristandeleu/pytorch-meta/issues/116
"""
from argparse import Namespace
import torch
from torch import nn
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
# from meta_learning.base_models.learner_from_opt_as_few_shot_paper import get_learner_from_args
from uutils.torch_uu.models.learner_from_opt_as_few_shot_paper import get_learner_from_args
from uutils.torch_uu import process_meta_batch
from uutils.torch_uu.dataloaders import get_distributed_dataloader_miniimagenet_torchmeta, get_args_for_mini_imagenet
from uutils.torch_uu.distributed import print_process_info, print_gpu_info, setup_process, move_model_to_ddp, \
cleanup, find_free_port
def get_dist_dataloader_torch_meta_mini_imagenet(args) -> dict[str, DataLoader]:
dataloaders: dict[str, DataLoader] = get_distributed_dataloader_miniimagenet_torchmeta(args)
return dataloaders
def run_parallel_training_loop(rank: int, args: Namespace):
"""
Run torchmeta examples with a distributed dataloader.
This should distribute the following loop:
for batch_idx, batch in enumerate(dataloader['train']):
print(f'{batch_idx=}')
spt_x, spt_y, qry_x, qry_y = process_meta_batch(args, batch)
print(f'Train inputs shape: {spt_x.size()}') # (2, 25, 3, 28, 28)
print(f'Train targets shape: {spt_y.size()}'.format(spt_y.shape)) # (2, 25)
print(f'Test inputs shape: {qry_x.size()}') # (2, 75, 3, 28, 28)
print(f'Test targets shape: {qry_y.size()}') # (2, 75)
break
Note:
usual loop for ddp looks as follows:
for i, batch in enumerate(train_loader):
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
if rank == 0:
print(f'{loss=}')
# Backward and optimize
optimizer.zero_grad()
loss.backward() # When the backward() returns, param.grad already contains the synchronized gradient tensor.
optimizer.step()
"""
print(f'-> started ps with {rank=}')
args.rank = rank
print_process_info(args.rank)
print_gpu_info()
args.gpu = rank
setup_process(args, rank, master_port=args.master_port, world_size=args.world_size)
# get ddp model
print('about to create model')
# args.Din, args.Dout = 10, 10
# model = nn.Linear(args.Din, args.Dout)
model = get_learner_from_args(args)
model = move_model_to_ddp(rank, args, model)
criterion = nn.CrossEntropyLoss().to(args.gpu)
print('done creating ddp model')
# can distributed dataloader
print('about to create torch meta data loader')
dataloaders: dict[str, DataLoader] = get_distributed_dataloader_miniimagenet_torchmeta(args)
print('done created distributed data loaders')
optimizer = torch.optim.SGD(model.parameters(), 1e-4)
# do training
print('about to train')
for batch_idx, batch in enumerate(dataloaders['train']):
print(f'{batch_idx=}')
spt_x, spt_y, qry_x, qry_y = process_meta_batch(args, batch)
outputs = model(spt_x)
loss = criterion(outputs, spt_y)
if rank == 0:
print(f'{loss=}')
# Backward and optimize
optimizer.zero_grad()
loss.backward() # When the backward() returns, param.grad already contains the synchronized gradient tensor.
optimizer.step()
# Destroy a given process group, and deinitialize the distributed package
cleanup(rank)
def hello(rank: int, args):
print(f'hello {rank=}')
def ddp_example_torchmeta_dataloader_test():
"""
Useful links:
- https://github.com/yangkky/distributed_tutorial/blob/master/src/mnist-distributed.py
- https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
"""
print('test_basic_ddp_example')
# args = Namespace(epochs=3, batch_size=8)
args = get_args_for_mini_imagenet()
if torch.cuda.is_available():
args.world_size = torch.cuda.device_count()
else:
args.world_size = 4
args.master_port = find_free_port()
print('\nABOUT TO SPAWN WORKERS (via mp.spawn)')
# mp.spawn(hello, args=(args,), nprocs=args.world_size)
mp.spawn(run_parallel_training_loop, args=(args,), nprocs=args.world_size)
print('mp.spawn finished\a')
if __name__ == '__main__':
print('')
ddp_example_torchmeta_dataloader_test()
print('Done\a') | 36.305344 | 124 | 0.697855 |
da988436bc389cc5e7037b64be850da52ef4f83f | 1,016 | py | Python | ca_on_school_boards_english_public/__init__.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 19 | 2015-05-26T03:18:50.000Z | 2022-01-31T03:27:41.000Z | ca_on_school_boards_english_public/__init__.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 119 | 2015-01-09T06:09:35.000Z | 2022-01-20T23:05:05.000Z | ca_on_school_boards_english_public/__init__.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 17 | 2015-11-23T05:00:10.000Z | 2021-09-15T16:03:33.000Z | from utils import CanadianJurisdiction
from opencivicdata.divisions import Division
from pupa.scrape import Organization
class OntarioEnglishPublicSchoolBoards(CanadianJurisdiction):
classification = 'school' # just to avoid clash
division_id = 'ocd-division/country:ca/province:on'
division_name = 'Ontario English Public School Board boundary"'
name = 'Ontario English Public School Boards'
url = 'http://www.edu.gov.on.ca/eng/sbinfo/boardList.html'
def get_organizations(self):
organization = Organization(self.name, classification='committee')
organization.add_source(self.url)
for division in Division.get(self.division_id).children('school_district'):
organization.add_post(role='Chair', label=division.name, division_id=division.id)
for i in range(0, 22): # XXX made-up number
organization.add_post(role='Trustee', label='{} (seat {})'.format(division.name, i), division_id=division.id)
yield organization
| 44.173913 | 125 | 0.722441 |
7568b64ccf6f876cd7b049a79cce42f18837f97c | 5,149 | py | Python | Python/anim.py | JoshuaSimon/N-Body-Problem | 1eb403f31d2708cf6828a61f8aac9b6fad5c5251 | [
"MIT"
] | 13 | 2019-07-04T11:04:20.000Z | 2022-03-09T11:00:53.000Z | Python/anim.py | JoshuaSimon/N-Body-Problem | 1eb403f31d2708cf6828a61f8aac9b6fad5c5251 | [
"MIT"
] | 1 | 2021-06-28T23:26:34.000Z | 2021-07-19T14:43:00.000Z | Python/anim.py | JoshuaSimon/N-Body-Problem | 1eb403f31d2708cf6828a61f8aac9b6fad5c5251 | [
"MIT"
] | 1 | 2020-10-14T14:40:58.000Z | 2020-10-14T14:40:58.000Z | import mayavi
import numpy as np
import matplotlib.pyplot as plt
from odes import *
from numpy.random import default_rng
from mayavi import mlab
from numba import jit
gravitational_constant = 1.0
num_bodies = 20
radius = 10
angles = np.linspace(0, 2*np.pi, num=num_bodies+1)[:-1]
masses = np.matrix(np.ones(angles.shape)) / num_bodies
initial_positions = radius * np.array([np.cos(angles), np.sin(angles)]).T
initial_velocities = np.array([-np.sin(angles), np.cos(angles)]).T
plt.scatter(initial_positions[:, 0], initial_positions[:, 1])
plt.quiver(initial_positions[:, 0], initial_positions[:, 1],
initial_velocities[:, 0], initial_velocities[:, 1])
plt.show()
y_0 = np.vstack([initial_positions, initial_velocities])
print(y_0)
# @solve_ivp(explicit_rk4, t_end=10, step_size=0.01)
@ solve_ivp(dopri54, t_end=100, step_size_0=0.05, eps_target=1e-7)
@ ivp(t_0=0.0,
y_0=y_0,
gravitational_constant=gravitational_constant,
masses=masses,
dimension=initial_positions[0].size)
def n_body_problem(t: np.array, y: np.array, gravitational_constant: float, masses: np.matrix, dimension: int):
""" d-dimensional n body problem
Args:
t: time
y: matrix of size 2n × d such that the first n rows are the positions r
and the second n rows are the velocities v for the n bodies in order
m: vector with mass of each particle; e.g. m = np.matrix([1,2,3])
Returns:
dy/dt
"""
d = dimension
n = y.shape[0] // d
# d = y.shape[1]
r = y[:n, :] # current positions
v = y[n:, :] # current velocities
# calculate forces on each particle
# find matrix with all possible products between masses
m = masses.transpose() @ masses
# print(f"{m=}")
# find all distance differences; dimension of diff1 is n × n × d
# print(f"{r=}")
diff = np.array([r - r[k, :] for k in range(n)])
# print(f"{diff=}")
# remove zero rows / self interactions
# diff = diff1[np.all(diff1 == 0, axis=2)].reshape((n, n-1, d))
# calculate |r_i - r_j|³
denom = np.sum(diff**2, axis=2) ** (3/2)
# note that these are elementwise operations
# denom will always contain zeros (on self interation)
# we ignore the divide by zero
with np.errstate(divide='ignore'):
scalar_factor = gravitational_constant * m / denom
# set the zero divisions to zero
scalar_factor[np.isinf(scalar_factor)] = 0
# actually calculate force vectors
# print(f"{scalar_factor=}")
# print(f"{diff[:, :, 0]=}")
force = np.array([scalar_factor * diff[:, :, k] for k in range(d)])
# print(f"{force=}")
# find total force on each particle in this step; the dimension is d × n
cum_force = np.sum(force, axis=2)
# the acceleration of the particle is given by a = F/m
acceleration = cum_force / masses
return np.vstack([v, acceleration.transpose()]).flatten()
def get_body_color(i_body, n_bodies, cmap=plt.get_cmap('jet_r')):
return cmap(i_body / n_bodies)
max_x = np.amax(initial_positions[:, 0])
max_y = np.amax(initial_positions[:, 1])
min_x = np.amin(initial_positions[:, 0])
min_y = np.amin(initial_positions[:, 1])
phi = np.linspace(0, 2*np.pi, num=10)
theta = np.linspace(0, np.pi, num=10)
P, T = np.meshgrid(phi, theta)
unit_sphere = (
0.2 * np.cos(P)*np.sin(T),
0.2 * np.sin(P)*np.sin(T),
0.2 * np.cos(T))
meshes = []
for i in range(num_bodies):
x = unit_sphere[0] + initial_positions[i, 0]
y = unit_sphere[1] + initial_positions[i, 1]
z = unit_sphere[2] # + initial_positions[i, 2]
c = get_body_color(i, num_bodies)[:3]
meshes.append(mlab.mesh(x, y, z, color=c))
@mlab.animate
@jit
def anim():
solution = n_body_problem()
for i in range(1, len(solution.ts)):
# plt.axis([min_x * 1.2, max_x * 1.2, min_y * 1.2, max_y * 1.2])
positions_to_i = solution.ys[:i, :len(initial_positions), :]
# print(positions_to_i)
delta_t = solution.ts[i] - solution.ts[i-1]
for j, mesh in enumerate(meshes):
xs = positions_to_i[:, j, 0]
ys = positions_to_i[:, j, 1]
mesh.mlab_source.trait_set(
x=unit_sphere[0] + xs[-1],
y=unit_sphere[1] + ys[-1]
)
# c = get_body_color(j, num_bodies)
# plt.plot(xs, ys, color=c)
# plt.plot([xs[-1]], [ys[-1]], "o", color=c)
yield
anim()
mlab.show()
"""
solution = n_body_problem()
plt.plot(np.array(range(0, len(solution.ts))), solution.ts)
plt.show()
for i in range(1, len(solution.ts)):
plt.cla()
plt.axis([min_x * 1.2, max_x * 1.2, min_y * 1.2, max_y * 1.2])
positions_to_i = solution.ys[:i, :len(initial_positions), :]
_, n_bodies, _ = positions_to_i.shape
# print(positions_to_i)
delta_t = solution.ts[i] - solution.ts[i-1]
for j in range(n_bodies):
xs = positions_to_i[:, j, 0]
ys = positions_to_i[:, j, 1]
c = get_body_color(j, n_bodies)
plt.plot(xs, ys, color=c)
plt.plot([xs[-1]], [ys[-1]], "o", color=c)
plt.pause(delta_t/10)
plt.show()
"""
| 34.790541 | 111 | 0.620897 |
59777e2e6bbc3d31348e26e1e9baaaf722599462 | 15,669 | py | Python | tests/test_config_entries.py | dannyqwertz/home-assistant | 688bdc6532e514afbdc8efd1f574a7b5c9e8d280 | [
"Apache-2.0"
] | 4 | 2019-01-10T14:47:54.000Z | 2021-04-22T02:06:27.000Z | tests/test_config_entries.py | au190/home-assistant | e87ecbd5007acad7468d7118d02b21f6d783c8bc | [
"Apache-2.0"
] | 6 | 2021-02-08T21:02:40.000Z | 2022-03-12T00:52:16.000Z | tests/test_config_entries.py | au190/home-assistant | e87ecbd5007acad7468d7118d02b21f6d783c8bc | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """Test the config manager."""
import asyncio
from datetime import timedelta
from unittest.mock import MagicMock, patch
import pytest
from homeassistant import config_entries, loader, data_entry_flow
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from tests.common import (
MockModule, mock_coro, MockConfigEntry, async_fire_time_changed,
MockPlatform, MockEntity)
@pytest.fixture
def manager(hass):
"""Fixture of a loaded config manager."""
manager = config_entries.ConfigEntries(hass, {})
manager._entries = []
manager._store._async_ensure_stop_listener = lambda: None
hass.config_entries = manager
return manager
@asyncio.coroutine
def test_call_setup_entry(hass):
"""Test we call <component>.setup_entry."""
MockConfigEntry(domain='comp').add_to_hass(hass)
mock_setup_entry = MagicMock(return_value=mock_coro(True))
loader.set_component(
hass, 'comp',
MockModule('comp', async_setup_entry=mock_setup_entry))
result = yield from async_setup_component(hass, 'comp', {})
assert result
assert len(mock_setup_entry.mock_calls) == 1
async def test_remove_entry(hass, manager):
"""Test that we can remove an entry."""
async def mock_setup_entry(hass, entry):
"""Mock setting up entry."""
hass.loop.create_task(hass.config_entries.async_forward_entry_setup(
entry, 'light'))
return True
async def mock_unload_entry(hass, entry):
"""Mock unloading an entry."""
result = await hass.config_entries.async_forward_entry_unload(
entry, 'light')
assert result
return result
entity = MockEntity(
unique_id='1234',
name='Test Entity',
)
async def mock_setup_entry_platform(hass, entry, async_add_entities):
"""Mock setting up platform."""
async_add_entities([entity])
loader.set_component(hass, 'test', MockModule(
'test',
async_setup_entry=mock_setup_entry,
async_unload_entry=mock_unload_entry
))
loader.set_component(
hass, 'light.test',
MockPlatform(async_setup_entry=mock_setup_entry_platform))
MockConfigEntry(domain='test', entry_id='test1').add_to_manager(manager)
entry = MockConfigEntry(
domain='test',
entry_id='test2',
)
entry.add_to_manager(manager)
MockConfigEntry(domain='test', entry_id='test3').add_to_manager(manager)
# Check all config entries exist
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test2', 'test3']
# Setup entry
await entry.async_setup(hass)
await hass.async_block_till_done()
# Check entity state got added
assert hass.states.get('light.test_entity') is not None
# Group all_lights, light.test_entity
assert len(hass.states.async_all()) == 2
# Check entity got added to entity registry
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert len(ent_reg.entities) == 1
entity_entry = list(ent_reg.entities.values())[0]
assert entity_entry.config_entry_id == entry.entry_id
# Remove entry
result = await manager.async_remove('test2')
await hass.async_block_till_done()
# Check that unload went well and so no need to restart
assert result == {
'require_restart': False
}
# Check that config entry was removed.
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test3']
# Check that entity state has been removed
assert hass.states.get('light.test_entity') is None
# Just Group all_lights
assert len(hass.states.async_all()) == 1
# Check that entity registry entry no longer references config_entry_id
entity_entry = list(ent_reg.entities.values())[0]
assert entity_entry.config_entry_id is None
@asyncio.coroutine
def test_remove_entry_raises(hass, manager):
"""Test if a component raises while removing entry."""
@asyncio.coroutine
def mock_unload_entry(hass, entry):
"""Mock unload entry function."""
raise Exception("BROKEN")
loader.set_component(
hass, 'test',
MockModule('comp', async_unload_entry=mock_unload_entry))
MockConfigEntry(domain='test', entry_id='test1').add_to_manager(manager)
MockConfigEntry(
domain='test',
entry_id='test2',
state=config_entries.ENTRY_STATE_LOADED
).add_to_manager(manager)
MockConfigEntry(domain='test', entry_id='test3').add_to_manager(manager)
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test2', 'test3']
result = yield from manager.async_remove('test2')
assert result == {
'require_restart': True
}
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test3']
@asyncio.coroutine
def test_remove_entry_if_not_loaded(hass, manager):
"""Test that we can remove an entry."""
mock_unload_entry = MagicMock(return_value=mock_coro(True))
loader.set_component(
hass, 'test',
MockModule('comp', async_unload_entry=mock_unload_entry))
MockConfigEntry(domain='test', entry_id='test1').add_to_manager(manager)
MockConfigEntry(domain='test', entry_id='test2').add_to_manager(manager)
MockConfigEntry(domain='test', entry_id='test3').add_to_manager(manager)
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test2', 'test3']
result = yield from manager.async_remove('test2')
assert result == {
'require_restart': False
}
assert [item.entry_id for item in manager.async_entries()] == \
['test1', 'test3']
assert len(mock_unload_entry.mock_calls) == 1
@asyncio.coroutine
def test_add_entry_calls_setup_entry(hass, manager):
"""Test we call setup_config_entry."""
mock_setup_entry = MagicMock(return_value=mock_coro(True))
loader.set_component(
hass, 'comp',
MockModule('comp', async_setup_entry=mock_setup_entry))
class TestFlow(config_entries.ConfigFlow):
VERSION = 1
@asyncio.coroutine
def async_step_user(self, user_input=None):
return self.async_create_entry(
title='title',
data={
'token': 'supersecret'
})
with patch.dict(config_entries.HANDLERS, {'comp': TestFlow, 'beer': 5}):
yield from manager.flow.async_init(
'comp', context={'source': config_entries.SOURCE_USER})
yield from hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry.data == {
'token': 'supersecret'
}
@asyncio.coroutine
def test_entries_gets_entries(manager):
"""Test entries are filtered by domain."""
MockConfigEntry(domain='test').add_to_manager(manager)
entry1 = MockConfigEntry(domain='test2')
entry1.add_to_manager(manager)
entry2 = MockConfigEntry(domain='test2')
entry2.add_to_manager(manager)
assert manager.async_entries('test2') == [entry1, entry2]
@asyncio.coroutine
def test_domains_gets_uniques(manager):
"""Test we only return each domain once."""
MockConfigEntry(domain='test').add_to_manager(manager)
MockConfigEntry(domain='test2').add_to_manager(manager)
MockConfigEntry(domain='test2').add_to_manager(manager)
MockConfigEntry(domain='test').add_to_manager(manager)
MockConfigEntry(domain='test3').add_to_manager(manager)
assert manager.async_domains() == ['test', 'test2', 'test3']
async def test_saving_and_loading(hass):
"""Test that we're saving and loading correctly."""
loader.set_component(
hass, 'test',
MockModule('test', async_setup_entry=lambda *args: mock_coro(True)))
class TestFlow(config_entries.ConfigFlow):
VERSION = 5
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@asyncio.coroutine
def async_step_user(self, user_input=None):
return self.async_create_entry(
title='Test Title',
data={
'token': 'abcd'
}
)
with patch.dict(config_entries.HANDLERS, {'test': TestFlow}):
await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_USER})
class Test2Flow(config_entries.ConfigFlow):
VERSION = 3
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
@asyncio.coroutine
def async_step_user(self, user_input=None):
return self.async_create_entry(
title='Test 2 Title',
data={
'username': 'bla'
}
)
with patch('homeassistant.config_entries.HANDLERS.get',
return_value=Test2Flow):
await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_USER})
# To trigger the call_later
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=1))
# To execute the save
await hass.async_block_till_done()
# Now load written data in new config manager
manager = config_entries.ConfigEntries(hass, {})
await manager.async_load()
# Ensure same order
for orig, loaded in zip(hass.config_entries.async_entries(),
manager.async_entries()):
assert orig.version == loaded.version
assert orig.domain == loaded.domain
assert orig.title == loaded.title
assert orig.data == loaded.data
assert orig.source == loaded.source
assert orig.connection_class == loaded.connection_class
async def test_forward_entry_sets_up_component(hass):
"""Test we setup the component entry is forwarded to."""
entry = MockConfigEntry(domain='original')
mock_original_setup_entry = MagicMock(return_value=mock_coro(True))
loader.set_component(
hass, 'original',
MockModule('original', async_setup_entry=mock_original_setup_entry))
mock_forwarded_setup_entry = MagicMock(return_value=mock_coro(True))
loader.set_component(
hass, 'forwarded',
MockModule('forwarded', async_setup_entry=mock_forwarded_setup_entry))
await hass.config_entries.async_forward_entry_setup(entry, 'forwarded')
assert len(mock_original_setup_entry.mock_calls) == 0
assert len(mock_forwarded_setup_entry.mock_calls) == 1
async def test_forward_entry_does_not_setup_entry_if_setup_fails(hass):
"""Test we do not set up entry if component setup fails."""
entry = MockConfigEntry(domain='original')
mock_setup = MagicMock(return_value=mock_coro(False))
mock_setup_entry = MagicMock()
hass, loader.set_component(hass, 'forwarded', MockModule(
'forwarded',
async_setup=mock_setup,
async_setup_entry=mock_setup_entry,
))
await hass.config_entries.async_forward_entry_setup(entry, 'forwarded')
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 0
async def test_discovery_notification(hass):
"""Test that we create/dismiss a notification when source is discovery."""
loader.set_component(hass, 'test', MockModule('test'))
await async_setup_component(hass, 'persistent_notification', {})
class TestFlow(config_entries.ConfigFlow):
VERSION = 5
async def async_step_discovery(self, user_input=None):
if user_input is not None:
return self.async_create_entry(
title='Test Title',
data={
'token': 'abcd'
}
)
return self.async_show_form(
step_id='discovery',
)
with patch.dict(config_entries.HANDLERS, {'test': TestFlow}):
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY})
await hass.async_block_till_done()
state = hass.states.get('persistent_notification.config_entry_discovery')
assert state is not None
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
state = hass.states.get('persistent_notification.config_entry_discovery')
assert state is None
async def test_discovery_notification_not_created(hass):
"""Test that we not create a notification when discovery is aborted."""
loader.set_component(hass, 'test', MockModule('test'))
await async_setup_component(hass, 'persistent_notification', {})
class TestFlow(config_entries.ConfigFlow):
VERSION = 5
async def async_step_discovery(self, user_input=None):
return self.async_abort(reason='test')
with patch.dict(config_entries.HANDLERS, {'test': TestFlow}):
await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY})
await hass.async_block_till_done()
state = hass.states.get('persistent_notification.config_entry_discovery')
assert state is None
async def test_loading_default_config(hass):
"""Test loading the default config."""
manager = config_entries.ConfigEntries(hass, {})
with patch('homeassistant.util.json.open', side_effect=FileNotFoundError):
await manager.async_load()
assert len(manager.async_entries()) == 0
async def test_updating_entry_data(manager):
"""Test that we can update an entry data."""
entry = MockConfigEntry(
domain='test',
data={'first': True},
)
entry.add_to_manager(manager)
manager.async_update_entry(entry, data={
'second': True
})
assert entry.data == {
'second': True
}
async def test_setup_raise_not_ready(hass, caplog):
"""Test a setup raising not ready."""
entry = MockConfigEntry(domain='test')
mock_setup_entry = MagicMock(side_effect=ConfigEntryNotReady)
loader.set_component(
hass, 'test', MockModule('test', async_setup_entry=mock_setup_entry))
with patch('homeassistant.helpers.event.async_call_later') as mock_call:
await entry.async_setup(hass)
assert len(mock_call.mock_calls) == 1
assert 'Config entry for test not ready yet' in caplog.text
p_hass, p_wait_time, p_setup = mock_call.mock_calls[0][1]
assert p_hass is hass
assert p_wait_time == 5
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
mock_setup_entry.side_effect = None
mock_setup_entry.return_value = mock_coro(True)
await p_setup(None)
assert entry.state == config_entries.ENTRY_STATE_LOADED
async def test_setup_retrying_during_unload(hass):
"""Test if we unload an entry that is in retry mode."""
entry = MockConfigEntry(domain='test')
mock_setup_entry = MagicMock(side_effect=ConfigEntryNotReady)
loader.set_component(
hass, 'test', MockModule('test', async_setup_entry=mock_setup_entry))
with patch('homeassistant.helpers.event.async_call_later') as mock_call:
await entry.async_setup(hass)
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
assert len(mock_call.return_value.mock_calls) == 0
await entry.async_unload(hass)
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
assert len(mock_call.return_value.mock_calls) == 1
| 33.338298 | 78 | 0.686579 |
bae3b970b78d228ef04b57759af0aa97b14100d4 | 9,605 | py | Python | .history/classes/Menu_20171107224812.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107224812.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107224812.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | # DADSA - Assignment 1
# Reece Benson
from functools import partial
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current = [ "main" ]
_current_menu = "main"
just_called_back = False
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = { }
self._current = [ "main" ]
self._current_menu = "main"
self.just_called_back = False
# Create our Menu
self._menu['main'] = { "new_season_name": "New Season", "load_season": "Load Season" }
self._menu['new_season_name'] = lambda: self.new_season()
self._menu['back'] = lambda: self.go_back()
self._menu['new_season'] = { "ns_players": "Players", "ns_tournaments": "Tournaments", "ns_prizemoney": "Prize Money", "ns_difficulty": "Difficulty", "back": "Back" }
self._menu['ns_players'] = { "ns_viewplayers": "View Players", "ns_viewplayer": "View Player", "back": "Back" }
self._menu['ns_tournaments'] = { "ns_viewtournaments": "Example Tournament 1", "back": "Back" }
self._menu['ns_prizemoney'] = { "ns_setprizemoney": "Set Prize Money", "ns_viewprizemoney": "View Prize Money", "back": "Back" }
self._menu['ns_difficulty'] = { "ns_setdifficulty": "Set Difficulty", "ns_viewdifficulty": "View Difficulty", "back": "Back" }
self._menu['load_season'] = { }
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
seasonVar = 'ls_'+str(seasonId)
self._menu['load_season'].update({ seasonVar: season.name() })
# Create our menu option for loading a season
self._menu[seasonVar] = { seasonVar+"_select": "Select Tournament", seasonVar+"_players": "View Players", seasonVar+"_details": "View Details", "back": "Back" }
# Create our menu options
self._menu[seasonVar+"_select"] = { }
self._menu[seasonVar+"_players"] = { }
self._menu[seasonVar+"_details"] = lambda: print(season.display("details"))
# Fill our menu options with extra options
# > "Select Tournament"
for tournament_name in season.tournaments():
tournamentVar = seasonVar+"_select_"+tournament_name
self._menu[seasonVar+"_select"].update({ tournamentVar: "Select {0}".format(tournament_name) })
# Set our gender specifiers within the tournament
self._menu[tournamentVar] = { }
for gdr in season.rounds():
self._menu[tournamentVar].update({ tournamentVar+"_"+gdr: "View {0} Rounds".format(gdr) })
self._menu[tournamentVar+"_"+gdr] = { }
for r, rnd in enumerate(season.rounds()[gdr], 1):
self._menu[tournamentVar+"_"+gdr].update({ tournamentVar+"-"+gdr+"-"+rnd: "Round {0}".format(r) })
self._menu[tournamentVar+"-"+gdr+"-"+rnd] = partial(print, "\n".join([ "{0} — Winner: {1}, updated score: {2}".format(m.versuses(True), m.winner()[0].name(), season.round(gdr, rnd).get_rank()) for m in season.round(gdr, rnd).matches() ]))
self._menu[tournamentVar+"_"+gdr].update({ "back": "Back" })
# Add tournament specific options
self._menu[tournamentVar].update({ tournamentVar+"_difficulty": "View Difficulty", tournamentVar+"_prizemoney": "View Prize Money" })
self._menu[tournamentVar+"_difficulty"] = partial(print, season.tournament(tournament_name).display("difficulty"))
self._menu[tournamentVar+"_prizemoney"] = partial(print, season.tournament(tournament_name).display("prize_money"))
# Add our back option
self._menu[tournamentVar].update({ "back": "Back" })
# > "View Players"
for gdr in season.players():
self._menu[seasonVar+"_players"].update({ seasonVar+"_players_"+gdr: "List {0}s".format(gdr) })
self._menu[seasonVar+"_players_"+gdr] = partial(print, season.display("players", gdr))
# > Add the back options to each submenu
self._menu[seasonVar+"_select"].update({ "back": "Back" })
self._menu[seasonVar+"_players"].update({ "back": "Back" })
self._menu["load_season"].update({ "back": "Back" })
# Display our Menu
self.display("main")
def new_season(self):
# Clear our terminal window
call("cls")
# Get our input
print("Please enter a season name (leave empty to cancel): ")
season_name = input("\n> ")
if(season_name != ""):
self._app.handler.add_season(season_name)
print("creating {}".format(season_name))
self.load()
else:
self._current_menu = "main"
self.display()
def go_back(self):
# Set our flag to true
self.just_called_back = True
# Pop off the last item of the list
self._current.pop()
# Set our current menu to the last element of the list
self._current_menu = self._current[-1]
def strike(self, text):
result = ''
for c in text:
result = result + c + '\u0336'
return result
def display(self, index = None, error = None):
# Clear our terminal window
call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or "main")
# Error Handling
if(error != None):
print("\n", "Error!", error, "\n")
# Menu Title, set tree
print("Please select an option: ({})".format("/".join(self._current)))
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Check that the menu option is available
if(m in self._menu):
# Is the Menu Item a Function?
m_type = None
if(callable(self._menu[m])):
m_type = ""
else:
m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, menu_name, m_type))
else:
print(self.strike("{0}. {1} [?]".format(menu_counter, menu_name)))
# Get User Input
self.get_input()
def validate_menu(self, index):
try:
menu_name = [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
return menu_name
except IndexError:
return None
def get_menu(self, menu_name):
# Check our Menu exists
if(not menu_name in self._menu):
return None
else:
return self._menu[menu_name]
def menu_exists(self, index):
# Find our indexed menu
menu_item = self.get_menu(self._current_menu)
menu_found = None
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Check that the menu option is available
if(m in self._menu):
# Has our menu been found?
if(menu_counter == index):
# Check if it's a function or a submenu
if(callable(self._menu[m])):
# Call our function
menu_found = self._menu[m]
else:
menu_found = m
else:
menu_found = None
return menu_found
def get_input(self):
# Wrap this in a try/except to validate any errors with input
try:
# Get users input
resp = int(input('\n>>> '))
# Validate some set input calls
if(resp == "exit"):
raise KeyboardInterrupt
elif(resp == ""):
return self.display(self._current_menu, "Please select a valid option!")
# Validate input from current menu
menu_selected = self.menu_exists(resp)
if(menu_selected != None and callable(menu_selected) != True):
print(menu_selected)
self._current.append(menu_selected)
self._current_menu = menu_selected
self.display(menu_selected)
elif(callable(menu_selected)):
# Clear our screen
call("cls")
# Call our function
menu_selected()
# Hold the user so they can see the result (if back hasn't just been called)
if(self.just_called_back == False):
input(">>> Press <Return> to continue...")
else:
self.just_called_back = False
# Display our menu again to stop from program termination
self.display(self._current_menu)
else:
self.display(self._current_menu, "Please select a valid option!")
except KeyboardInterrupt:
self._app.exit()
except ValueError:
self.display(self._current_menu, "Please select a valid option!")
| 38.729839 | 262 | 0.554086 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.