id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11515322
|
import os
import logging
from typing import Union # noqa: F401
from docutils import nodes # noqa E501
from docutils.parsers import rst
from ipypublish.utils import handle_error
from ipypublish.sphinx.utils import import_sphinx
from ipypublish.convert.main import IpyPubMain
# TODO should inherit from sphinx.parsers.RSTParser
# https://www.sphinx-doc.org/en/master/extdev/parserapi.html
# however, sphinx is an optional dependency
class NBParser(rst.Parser):
"""Sphinx source parser for Jupyter notebooks.
adapted from nbsphinx
"""
supported = ("jupyter_notebook",)
def __init__(self, *args, **kwargs):
self.app = None
self.config = None
self.env = None
try:
sphinx = import_sphinx()
class NotebookError(sphinx.errors.SphinxError):
"""Error during notebook parsing."""
category = "Notebook error"
self.error_nb = NotebookError
self.error_config = sphinx.errors.ConfigError
self.logger = sphinx.util.logging.getLogger("nbparser")
except (ImportError, AttributeError):
self.error_nb = IOError
self.error_config = TypeError
self.logger = logging.getLogger("nbparser")
super(NBParser, self).__init__(*args, **kwargs)
def set_application(self, app):
"""set_application will be called from Sphinx to set app
and other instance variables
Parameters
----------
app: sphinx.application.Sphinx
Sphinx application object
"""
self.app = app
self.config = app.config
self.env = app.env
def parse(self, inputstring, document):
# type: (Union[str, list[str]], nodes.document) -> None
"""Parse text and generate a document tree."""
# fix for when calling on readthedocs
self.env = self.env or document.settings.env
self.config = self.config or document.settings.env.config
# get file for conversion
filepath = self.env.doc2path(self.env.docname)
filedir = os.path.dirname(filepath)
self.logger.info("ipypublish: converting {}".format(filepath))
config = {
"IpyPubMain": {
"conversion": self.config.ipysphinx_export_config,
"plugin_folder_paths": self.config.ipysphinx_config_folders,
"outpath": filedir,
"folder_suffix": self.config.ipysphinx_folder_suffix,
"log_to_stdout": False,
"log_to_file": False,
"default_pporder_kwargs": dict(clear_existing=False, dump_files=True),
}
}
if self.config.ipysphinx_preconverters:
# NB: jupytext is already a default for .Rmd
config["IpyPubMain"][
"pre_conversion_funcs"
] = self.config.ipysphinx_preconverters
publish = IpyPubMain(config=config)
outdata = publish(filepath)
self.logger.info("ipypublish: successful conversion")
# check we got back restructuredtext
exporter = outdata["exporter"]
if not exporter.output_mimetype == "text/restructuredtext":
handle_error(
"ipypublish: the output content is not of type "
"text/restructuredtext: {}".format(exporter.output_mimetype),
TypeError,
self.logger,
)
# TODO document use of orphan
if outdata["resources"].get("ipub", {}).get("orphan", False):
rst.Parser.parse(self, ":orphan:", document)
# parse a prolog
if self.env.config.ipysphinx_prolog:
prolog = exporter.environment.from_string(
self.env.config.ipysphinx_prolog
).render(env=self.env)
rst.Parser.parse(self, prolog, document)
# parse the main body of the file
rst.Parser.parse(self, outdata["stream"], document)
# parse an epilog
if self.env.config.ipysphinx_epilog:
prolog = exporter.environment.from_string(
self.env.config.ipysphinx_epilog
).render(env=self.env)
rst.Parser.parse(self, prolog, document)
# TODO is there a better way to parse data back from the parser?
# record if the notebook contains ipywidgets
if outdata["resources"].get("contains_ipywidgets", False):
if not hasattr(self.env, "ipysphinx_widgets"):
self.env.ipysphinx_widgets = set()
self.env.ipysphinx_widgets.add(self.env.docname)
# record that the document was created from a notebook
if not hasattr(self.env, "ipysphinx_created_from_nb"):
self.env.ipysphinx_created_from_nb = set()
self.env.ipysphinx_created_from_nb.add(self.env.docname)
|
11515344
|
from typing import Any, Callable, Dict, List, Optional, Union, cast
from ..language import print_ast, StringValueNode
from ..language.block_string import print_block_string
from ..pyutils import inspect
from ..type import (
DEFAULT_DEPRECATION_REASON,
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLInputObjectType,
GraphQLInputType,
GraphQLInterfaceType,
GraphQLNamedType,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLUnionType,
is_enum_type,
is_input_object_type,
is_interface_type,
is_introspection_type,
is_object_type,
is_scalar_type,
is_specified_directive,
is_specified_scalar_type,
is_union_type,
)
from .ast_from_value import ast_from_value
__all__ = ["print_schema", "print_introspection_schema", "print_type", "print_value"]
def print_schema(schema: GraphQLSchema) -> str:
return print_filtered_schema(
schema, lambda n: not is_specified_directive(n), is_defined_type
)
def print_introspection_schema(schema: GraphQLSchema) -> str:
return print_filtered_schema(schema, is_specified_directive, is_introspection_type)
def is_defined_type(type_: GraphQLNamedType) -> bool:
return not is_specified_scalar_type(type_) and not is_introspection_type(type_)
def print_filtered_schema(
schema: GraphQLSchema,
directive_filter: Callable[[GraphQLDirective], bool],
type_filter: Callable[[GraphQLNamedType], bool],
) -> str:
directives = filter(directive_filter, schema.directives)
types = filter(type_filter, schema.type_map.values())
return "\n\n".join(
(
*filter(None, (print_schema_definition(schema),)),
*map(print_directive, directives),
*map(print_type, types),
)
)
def print_schema_definition(schema: GraphQLSchema) -> Optional[str]:
if schema.description is None and is_schema_of_common_names(schema):
return None
operation_types = []
query_type = schema.query_type
if query_type:
operation_types.append(f" query: {query_type.name}")
mutation_type = schema.mutation_type
if mutation_type:
operation_types.append(f" mutation: {mutation_type.name}")
subscription_type = schema.subscription_type
if subscription_type:
operation_types.append(f" subscription: {subscription_type.name}")
return print_description(schema) + "schema {\n" + "\n".join(operation_types) + "\n}"
def is_schema_of_common_names(schema: GraphQLSchema) -> bool:
"""Check whether this schema uses the common naming convention.
GraphQL schema define root types for each type of operation. These types are the
same as any other type and can be named in any manner, however there is a common
naming convention:
schema {
query: Query
mutation: Mutation
subscription: Subscription
}
When using this naming convention, the schema description can be omitted.
"""
query_type = schema.query_type
if query_type and query_type.name != "Query":
return False
mutation_type = schema.mutation_type
if mutation_type and mutation_type.name != "Mutation":
return False
subscription_type = schema.subscription_type
if subscription_type and subscription_type.name != "Subscription":
return False
return True
def print_type(type_: GraphQLNamedType) -> str:
if is_scalar_type(type_):
type_ = cast(GraphQLScalarType, type_)
return print_scalar(type_)
if is_object_type(type_):
type_ = cast(GraphQLObjectType, type_)
return print_object(type_)
if is_interface_type(type_):
type_ = cast(GraphQLInterfaceType, type_)
return print_interface(type_)
if is_union_type(type_):
type_ = cast(GraphQLUnionType, type_)
return print_union(type_)
if is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
return print_enum(type_)
if is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
return print_input_object(type_)
# Not reachable. All possible types have been considered.
raise TypeError(f"Unexpected type: {inspect(type_)}.")
def print_scalar(type_: GraphQLScalarType) -> str:
return (
print_description(type_)
+ f"scalar {type_.name}"
+ print_specified_by_url(type_)
)
def print_implemented_interfaces(
type_: Union[GraphQLObjectType, GraphQLInterfaceType]
) -> str:
interfaces = type_.interfaces
return " implements " + " & ".join(i.name for i in interfaces) if interfaces else ""
def print_object(type_: GraphQLObjectType) -> str:
return (
print_description(type_)
+ f"type {type_.name}"
+ print_implemented_interfaces(type_)
+ print_fields(type_)
)
def print_interface(type_: GraphQLInterfaceType) -> str:
return (
print_description(type_)
+ f"interface {type_.name}"
+ print_implemented_interfaces(type_)
+ print_fields(type_)
)
def print_union(type_: GraphQLUnionType) -> str:
types = type_.types
possible_types = " = " + " | ".join(t.name for t in types) if types else ""
return print_description(type_) + f"union {type_.name}" + possible_types
def print_enum(type_: GraphQLEnumType) -> str:
values = [
print_description(value, " ", not i)
+ f" {name}"
+ print_deprecated(value.deprecation_reason)
for i, (name, value) in enumerate(type_.values.items())
]
return print_description(type_) + f"enum {type_.name}" + print_block(values)
def print_input_object(type_: GraphQLInputObjectType) -> str:
fields = [
print_description(field, " ", not i) + " " + print_input_value(name, field)
for i, (name, field) in enumerate(type_.fields.items())
]
return print_description(type_) + f"input {type_.name}" + print_block(fields)
def print_fields(type_: Union[GraphQLObjectType, GraphQLInterfaceType]) -> str:
fields = [
print_description(field, " ", not i)
+ f" {name}"
+ print_args(field.args, " ")
+ f": {field.type}"
+ print_deprecated(field.deprecation_reason)
for i, (name, field) in enumerate(type_.fields.items())
]
return print_block(fields)
def print_block(items: List[str]) -> str:
return " {\n" + "\n".join(items) + "\n}" if items else ""
def print_args(args: Dict[str, GraphQLArgument], indentation: str = "") -> str:
if not args:
return ""
# If every arg does not have a description, print them on one line.
if not any(arg.description for arg in args.values()):
return (
"("
+ ", ".join(print_input_value(name, arg) for name, arg in args.items())
+ ")"
)
return (
"(\n"
+ "\n".join(
print_description(arg, f" {indentation}", not i)
+ f" {indentation}"
+ print_input_value(name, arg)
for i, (name, arg) in enumerate(args.items())
)
+ f"\n{indentation})"
)
def print_input_value(name: str, arg: GraphQLArgument) -> str:
default_ast = ast_from_value(arg.default_value, arg.type)
arg_decl = f"{name}: {arg.type}"
if default_ast:
arg_decl += f" = {print_ast(default_ast)}"
return arg_decl + print_deprecated(arg.deprecation_reason)
def print_directive(directive: GraphQLDirective) -> str:
return (
print_description(directive)
+ f"directive @{directive.name}"
+ print_args(directive.args)
+ (" repeatable" if directive.is_repeatable else "")
+ " on "
+ " | ".join(location.name for location in directive.locations)
)
def print_deprecated(reason: Optional[str]) -> str:
if reason is None:
return ""
if reason != DEFAULT_DEPRECATION_REASON:
ast_value = print_ast(StringValueNode(value=reason))
return f" @deprecated(reason: {ast_value})"
return " @deprecated"
def print_specified_by_url(scalar: GraphQLScalarType) -> str:
if scalar.specified_by_url is None:
return ""
ast_value = print_ast(StringValueNode(value=scalar.specified_by_url))
return f" @specifiedBy(url: {ast_value})"
def print_description(
def_: Union[
GraphQLArgument,
GraphQLDirective,
GraphQLEnumValue,
GraphQLNamedType,
GraphQLSchema,
],
indentation: str = "",
first_in_block: bool = True,
) -> str:
description = def_.description
if description is None:
return ""
prefer_multiple_lines = len(description) > 70
block_string = print_block_string(description, prefer_multiple_lines)
prefix = "\n" + indentation if indentation and not first_in_block else indentation
return prefix + block_string.replace("\n", "\n" + indentation) + "\n"
def print_value(value: Any, type_: GraphQLInputType) -> str:
"""@deprecated: Convenience function for printing a Python value"""
return print_ast(ast_from_value(value, type_)) # type: ignore
|
11515361
|
from .external_optimizer import ExternalOptimizer
from .ga import GeneticAlgorithmContinue, GeneticAlgorithmInit
from .search_util import convert_scale, initialize_search_param
|
11515363
|
Import("env")
def before_upload(source, target, env):
print("************* pio_fix_app_address.py(before_upload)")
print("*** Fixing incorrect app address for OTA based deployments in UPLOADCMD env variable.")
print("*** See: https://github.com/platformio/platform-espressif32/issues/403")
print("*** hard coded address 0x10000 in ~/.platformio/platforms/espressif32/builder/main.py")
print("Current value: ", env.Dump("UPLOADCMD"))
env.Replace(UPLOADCMD = '"$PYTHONEXE" "$UPLOADER" $UPLOADERFLAGS 0x20000 $SOURCE')
print("Updated value: ", env.Dump("UPLOADCMD"))
print("************** done.")
env.AddPreAction("upload", before_upload)
|
11515387
|
import asyncio
import cProfile
import logging
import pathlib
from seno.util.path import mkdir, path_from_root
# to use the profiler, enable it config file, "enable_profiler"
# the output will be printed to your seno root path, e.g. ~/.seno2/mainnet/profile/
# to analyze the profile, run:
# python seno/utils/profiler.py ~/.seno2/mainnet/profile | less -r
# this will print CPU usage of the seno full node main thread at 1 second increments.
# find a time window of interest and analyze the profile file (which are in pstats format).
# for example:
# python seno/utils/profiler.py ~/.seno2/mainnet/profile 10 20
async def profile_task(root_path: pathlib.Path, log: logging.Logger) -> None:
profile_dir = path_from_root(root_path, "profile")
log.info("Starting profiler. saving to %s" % profile_dir)
mkdir(profile_dir)
counter = 0
while True:
pr = cProfile.Profile()
pr.enable()
# this will throw CancelledError when we're exiting
await asyncio.sleep(1)
pr.create_stats()
pr.dump_stats(profile_dir / ("slot-%05d.profile" % counter))
log.debug("saving profile %05d" % counter)
counter += 1
if __name__ == "__main__":
import sys
import pstats
import io
from colorama import init, Fore, Back, Style
from subprocess import check_call
profile_dir = pathlib.Path(sys.argv[1])
init(strip=False)
def analyze_cpu_usage(profile_dir: pathlib.Path):
counter = 0
try:
while True:
f = io.StringIO()
st = pstats.Stats(str(profile_dir / ("slot-%05d.profile" % counter)), stream=f)
st.strip_dirs()
st.sort_stats(pstats.SortKey.CUMULATIVE)
st.print_stats()
f.seek(0)
total = 0.0
sleep = 0.0
# output looks like this:
# ncalls tottime percall cumtime percall filename:lineno(function)
# 1 0.000 0.000 0.000 0.000 <function>
for line in f:
if " function calls " in line and " in " in line and " seconds":
# 304307 function calls (291692 primitive calls) in 1.031 seconds
assert total == 0
total = float(line.split()[-2])
continue
columns = line.split(None, 5)
if len(columns) < 6 or columns[0] == "ncalls":
continue
# TODO: to support windows and MacOS, extend this to a list of function known to sleep the process
# e.g. WaitForMultipleObjects or kqueue
if "{method 'poll' of 'select.epoll' objects}" in columns[5]:
# cumulative time
sleep += float(columns[3])
if sleep < 0.000001:
percent = 100.0
else:
percent = 100.0 * (total - sleep) / total
if percent > 90:
color = Fore.RED + Style.BRIGHT
elif percent > 80:
color = Fore.MAGENTA + Style.BRIGHT
elif percent > 70:
color = Fore.YELLOW + Style.BRIGHT
elif percent > 60:
color = Style.BRIGHT
elif percent < 10:
color = Fore.GREEN
else:
color = ""
quantized = int(percent // 2)
print(
("%05d: " + color + "%3.0f%% CPU " + Back.WHITE + "%s" + Style.RESET_ALL + "%s|")
% (counter, percent, " " * quantized, " " * (50 - quantized))
)
counter += 1
except Exception as e:
print(e)
def analyze_slot_range(profile_dir: pathlib.Path, first: int, last: int):
if last < first:
print("ERROR: first must be <= last when specifying slot range")
return
files = []
for i in range(first, last + 1):
files.append(str(profile_dir / ("slot-%05d.profile" % i)))
output_file = "seno-hotspot-%d" % first
if first < last:
output_file += "-%d" % last
print("generating call tree for slot(s) [%d, %d]" % (first, last))
check_call(["gprof2dot", "-f", "pstats", "-o", output_file + ".dot"] + files)
with open(output_file + ".png", "w+") as f:
check_call(["dot", "-T", "png", output_file + ".dot"], stdout=f)
print("output written to: %s.png" % output_file)
if len(sys.argv) == 2:
# this analyzes the CPU usage at all slots saved to the profiler directory
analyze_cpu_usage(profile_dir)
elif len(sys.argv) in [3, 4]:
# the additional arguments are interpreted as either one slot, or a
# slot range (first and last) to analyze
first = int(sys.argv[2])
last = int(sys.argv[3]) if len(sys.argv) == 4 else first
analyze_slot_range(profile_dir, first, last)
else:
print(
"""USAGE:
profiler.py <profile-directory>
Analyze CPU usage at each 1 second interval from the profiles in the specified
directory. Print colored timeline to stdout
profiler.py <profile-directory> <slot>
profiler.py <profile-directory> <first-slot> <last-slot>
Analyze a single slot, or a range of time slots, from the profile directory
"""
)
|
11515414
|
from struct import Struct, calcsize, pack, pack_into, unpack_from
from collections import OrderedDict
from ixypy.virtio.types import VRING_AVAIL_F_NO_INTERRUPT, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC
from ixypy.virtio.exception import VirtioException, BufferSizeException
from ixypy.ixy import IxyStruct, IxyQueue
MAX_QUEUE_SIZE = 32768
def align(offset, alignment=4096):
return (offset + (alignment-1)) & -alignment
class VQueue(IxyQueue):
def __init__(self, memory, size, identifier, notification_offset, mempool=None):
super().__init__(memory, size, identifier, mempool)
self.vring = VRing(memory, size)
self.notification_offset = notification_offset
self.used_last_index = 0
def disable_interrupts(self):
self.vring.available.flags = VRING_AVAIL_F_NO_INTERRUPT
self.vring.used.flags = 0
def get_free_descriptor(self, index=0):
for i in range(index, len(self.vring.descriptors)):
desc = self.vring.descriptors[i]
if desc.address == 0:
return i, desc
raise VirtioException('Queue overflow')
def free_descriptors(self):
for index, desc in enumerate(self.vring.descriptors):
if desc.address == 0:
yield index, desc
class VirtioNetworkHeader(object):
data_format = 'B B H H H H'
def __init__(self, flags=0, gso_type=0, header_len=0, gso_size=0, csum_start=0, csum_offset=0):
self.flags = flags
self.gso_type = gso_type
self.header_len = header_len
self.gso_size = gso_size
self.csum_start = csum_start
self.csum_offset = csum_offset
self.struct = Struct(self.data_format)
def to_buffer(self, buffer, offset=0):
self.struct.pack_into(buffer,
offset,
self.flags,
self.gso_type,
self.header_len,
self.gso_size,
self.csum_start,
self.csum_offset)
def __len__(self):
return self.byte_size()
@staticmethod
def byte_size():
# B B H H H H ==> 10
return 10
# return calcsize(VirtioNetworkHeader.data_format)
class VCommand(object):
def __init__(self, class_, id_):
self.class_ = class_
self.id = id_
def bytes(self):
pass
class PromiscuousModeCommand(VCommand):
def __init__(self, on=True):
self.on = on
super().__init__(VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC)
def bytes(self):
return pack('B', self.on)
def __len__(self):
return 1
class VirtioNetworkControl(object):
"""
u8 class
u8 command
u8 command-specific-data[]
u8 ack
"""
data_format = 'B B {:d}B B'
def __init__(self, command, ack=0):
self.command = command
self.ack = ack
def to_buffer(self, buffer, offset=0):
fmt = self.data_format.format(len(self.command))
pack_into(fmt, buffer, offset, self.command.class_, self.command.id, *self.command.bytes(), self.ack)
@staticmethod
def from_bytes(byte_sequence):
pass
@property
def command_class(self):
return self.command.class_
@property
def command_id(self):
return self.command.id
def __len__(self):
return calcsize(self.data_format.format(len(self.command)))
class VRing(object):
dump_count = 0
def __init__(self, buffer, size, alignment=4096):
if size > MAX_QUEUE_SIZE:
raise VirtioException("Size[{}] exceeded maximum size[{}]".format(size, MAX_QUEUE_SIZE))
if len(buffer) < self.byte_size(size, alignment):
raise BufferSizeException("Required: {}, Received: {}".format(len(buffer), self.byte_size(size, alignment)))
self.size = size
self.buffer = buffer
self.alignment = alignment
self.descriptors = self._descriptors()
self.available = self._available()
self.used = self._used()
def _descriptors(self):
item_size = VRingDescriptor.byte_size()
descriptor_tbl_size = VRing.descriptor_table_size(self.size)
sub_buff = self.buffer[:descriptor_tbl_size]
descriptor_buffers = [sub_buff[i*item_size:item_size*(i+1)] for i in range(self.size)]
return [VRingDescriptor.create_descriptor(dsc_buff) for dsc_buff in descriptor_buffers]
def _available(self):
descriptor_tbl_size = VRing.descriptor_table_size(self.size)
available_queue_size = VRing.available_queue_size(self.size)
sub_buff = self.buffer[descriptor_tbl_size:(descriptor_tbl_size + available_queue_size)]
avail = Available(sub_buff, self.size)
avail.index = 0
for i in range(len(avail.rings)):
avail.rings[i] = 0
return Available(sub_buff, self.size)
def _used(self):
buffer_start = len(self) - VRing.used_queue_size(self.size)
sub_buff = self.buffer[buffer_start:len(self)]
used = VRingUsed(sub_buff, self.size)
used.index = 0
for ring in used.rings:
ring.id = 0
ring.len = 0
return used
def __len__(self):
return VRing.byte_size(self.size, self.alignment)
def dump(self):
with open('dumps/vring/vring_{:d}'.format(self.dump_count), 'wb') as f:
f.write(self.buffer)
self.dump_count += 1
@staticmethod
def used_queue_size(queue_size):
return VRingUsed.byte_size(queue_size)
@staticmethod
def descriptor_table_size(queue_size):
return VRingDescriptor.byte_size() * queue_size
@staticmethod
def available_queue_size(queue_size):
return Available.byte_size(queue_size)
@staticmethod
def padding(queue_size, alignment=4096):
dsc_tbl_sz = VRing.descriptor_table_size(queue_size)
avail_sz = VRing.available_queue_size(queue_size)
offset = dsc_tbl_sz + avail_sz
return -offset & (alignment - 1)
@staticmethod
def byte_size(queue_size, alignment=4096):
# see 2.4.2
dsc_tbl_sz = VRing.descriptor_table_size(queue_size)
avail_qsz = VRing.available_queue_size(queue_size)
used_qsz = VRing.used_queue_size(queue_size)
return align(dsc_tbl_sz + avail_qsz, alignment) + used_qsz
class VRingDescriptor(IxyStruct):
data_format = 'Q I H H'
def __init__(self, mem):
super().__init__(mem)
@staticmethod
def create_descriptor(mem):
"""
Creates a new descriptor around a buffer
and sets all the fields to zero
"""
descriptor = VRingDescriptor(mem)
descriptor.length = 0
descriptor.address = 0
descriptor.flags = 0
descriptor.next_descriptor = 0
return descriptor
@property
def address(self):
return unpack_from('Q', self.mem, 0)[0]
@address.setter
def address(self, address):
pack_into('Q', self.mem, 0, address)
@property
def length(self):
return unpack_from('I', self.mem, 8)[0]
@length.setter
def length(self, length):
# Q ==> 8
pack_into('I', self.mem, 8, length)
@property
def flags(self):
return unpack_from('H', self.mem, 12)[0]
@flags.setter
def flags(self, flags):
# Q I ==> 12
pack_into('H', self.mem, 12, flags)
@property
def next_descriptor(self):
return unpack_from('H', self.mem, 14)[0]
@next_descriptor.setter
def next_descriptor(self, next_descriptor):
# Q I H ==> 14
pack_into('H', self.mem, 14, next_descriptor)
def reset(self):
self.write(0, 0, 0, 0)
def write(self, length, addr, flags, next_descriptor):
self.data_struct.pack_into(self.mem, 0, length, addr, flags, next_descriptor)
class Available(object):
data_format = 'H H'
def __init__(self, buffer, size):
self.size = size
self.struct = Struct(Available.data_format.format(size))
self.buffer = buffer[:self.struct.size]
self.rings = RingList(buffer[self.struct.size:], size)
@property
def flags(self):
return unpack_from('H', self.buffer, 0)[0]
@flags.setter
def flags(self, flags):
pack_into('H', self.buffer, 0, flags)
@property
def index(self):
return unpack_from('H', self.buffer, 2)[0]
@index.setter
def index(self, index):
index = index % 0xFFFF
pack_into('H', self.buffer, 2, index)
@staticmethod
def byte_size(queue_size):
"""
H H + queue_sizeH(RingList) + xx
uint16_t avail_flags;
uint16_t avail_idx;
uint16_t available[num];
uint16_t used_event_idx;
"""
return 4 + 2 * queue_size
def __str__(self):
return 'size={} buffer_size={}'.format(self.size, len(self.buffer))
def _unpack(self):
return self.struct.unpack(self.buffer)
def _pack_into_buffer(self, value, field_format, prefix=''):
offset = calcsize(prefix)
pack_into(field_format, self.buffer, offset, value)
class RingList(object):
def __init__(self, buffer, size):
self.struct = Struct('{:d}H'.format(size))
self.buffer = buffer
self.size = size
def __getitem__(self, index):
return unpack_from('H', self.buffer, self._get_offset(index))[0]
def __setitem__(self, index, value):
pack_into('H', self.buffer, self._get_offset(index), value)
def __len__(self):
return self.size
def __iter__(self):
return RingListIterator(self)
@staticmethod
def _get_offset(index):
# H ==> 2
return 2 * index
class RingListIterator(object):
def __init__(self, ring_list):
self.i = 0
self.ring_list = ring_list
def __iter__(self):
return self
def __next__(self):
if self.i < len(self.ring_list):
ring = self.ring_list[self.i]
self.i += 1
return ring
else:
raise StopIteration()
class VRingUsedElement(object):
data_format = 'I I'
def __init__(self, buffer):
self.buffer = buffer
self.struct = Struct(self.data_format)
@staticmethod
def create_used_element(buff):
used_element = VRingUsedElement(buff)
used_element.id = 0
used_element.length = 0
return used_element
@property
def id(self):
# return self._unpack()[0]
return unpack_from('I', self.buffer, 0)[0]
@id.setter
def id(self, _id):
pack_into('I', self.buffer, 0, _id)
@property
def length(self):
return self._unpack()[1]
@length.setter
def length(self, length):
# I ==> 4
pack_into('I', self.buffer, 4, length)
@staticmethod
def byte_size():
# I I ==> 8
return 8
def _unpack(self):
return self.struct.unpack(self.buffer)
def _pack_into_buffer(self, value, field_format, prefix=''):
offset = calcsize(prefix)
pack_into(field_format, self.buffer, offset, value)
def __str__(self):
return 'id={}, length={}'.format(self.id, self.length)
class VRingUsed(object):
data_format = 'H H'
def __init__(self, buffer, size):
self.buffer = buffer
self.self_buffer = buffer[:4]
self.size = size
used_elem_size = VRingUsedElement.byte_size()
self.struct = Struct(VRingUsed.data_format.format(used_elem_size*size))
elem_buff = buffer[4:]
self.rings = [VRingUsedElement.create_used_element(elem_buff[i*used_elem_size:used_elem_size*(i + 1)]) for i in range(size)]
@property
def flags(self):
return self._unpack()[0]
@flags.setter
def flags(self, flags):
pack_into('H', self.buffer, 0, flags)
@property
def index(self):
return self._unpack()[1]
@index.setter
def index(self, index):
# H ==> 2
pack_into('H', self.buffer, 2, index)
def _pack_into_buffer(self, value, field_format, prefix=''):
offset = calcsize(prefix)
pack_into(field_format, self.buffer, offset, value)
def __str__(self):
return 'size={} buffer_size={} format={}'.format(self.size, len(self.buffer), self.struct.format)
def _unpack(self):
return self.struct.unpack(self.self_buffer)
@staticmethod
def byte_size(queue_size):
# H H (used elements interpreted as padding bytes)
return 4 + VRingUsedElement.byte_size() * queue_size
|
11515427
|
from okcupyd import User
from . import util
@util.use_cassette
def test_question_answer_id_for_user_question():
user = User()
user_question = user.profile.questions[0]
assert isinstance(user_question.answer_id, int)
assert user_question.answer_id == user.get_question_answer_id(user_question)
@util.use_cassette
def test_question_answer_id_for_profile_question():
user = User()
assert isinstance(user.get_question_answer_id(user.quickmatch().questions[0]), int)
|
11515447
|
from abc import ABC
import numpy as np
from ray.rllib.models.modelv2 import restore_original_dimensions
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
def convert_to_tensor(arr):
tensor = torch.from_numpy(np.asarray(arr))
if tensor.dtype == torch.double:
tensor = tensor.float()
return tensor
class ActorCriticModel(TorchModelV2, nn.Module, ABC):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.preprocessor = get_preprocessor(obs_space.original_space)(
obs_space.original_space
)
self.shared_layers = None
self.actor_layers = None
self.critic_layers = None
self._value_out = None
def forward(self, input_dict, state, seq_lens):
x = input_dict["obs"]
x = self.shared_layers(x)
# actor outputs
logits = self.actor_layers(x)
# compute value
self._value_out = self.critic_layers(x)
return logits, None
def value_function(self):
return self._value_out
def compute_priors_and_value(self, obs):
obs = convert_to_tensor([self.preprocessor.transform(obs)])
input_dict = restore_original_dimensions(obs, self.obs_space, "torch")
with torch.no_grad():
model_out = self.forward(input_dict, None, [1])
logits, _ = model_out
value = self.value_function()
logits, value = torch.squeeze(logits), torch.squeeze(value)
priors = nn.Softmax(dim=-1)(logits)
priors = priors.cpu().numpy()
value = value.cpu().numpy()
return priors, value
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class ConvNetModel(ActorCriticModel):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
ActorCriticModel.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
in_channels = model_config["custom_model_config"]["in_channels"]
feature_dim = model_config["custom_model_config"]["feature_dim"]
self.shared_layers = nn.Sequential(
nn.Conv2d(in_channels, 32, kernel_size=4, stride=2),
nn.Conv2d(32, 64, kernel_size=2, stride=1),
nn.Conv2d(64, 64, kernel_size=2, stride=1),
Flatten(),
nn.Linear(1024, feature_dim),
)
self.actor_layers = nn.Sequential(
nn.Linear(in_features=feature_dim, out_features=action_space.n)
)
self.critic_layers = nn.Sequential(
nn.Linear(in_features=feature_dim, out_features=1)
)
self._value_out = None
class DenseModel(ActorCriticModel):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
ActorCriticModel.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
self.shared_layers = nn.Sequential(
nn.Linear(
in_features=obs_space.original_space["obs"].shape[0], out_features=256
),
nn.Linear(in_features=256, out_features=256),
)
self.actor_layers = nn.Sequential(
nn.Linear(in_features=256, out_features=action_space.n)
)
self.critic_layers = nn.Sequential(nn.Linear(in_features=256, out_features=1))
self._value_out = None
|
11515460
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import os
import shutil
import argparse
import time
import logging
import numpy as np
import json
import models
from data import *
from functools import reduce
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__')
and callable(models.__dict__[name])
)
def parse_args():
# hyper-parameters are from ResNet paper
parser = argparse.ArgumentParser(
description='FracTrain on CIFAR')
parser.add_argument('--dir', help='annotate the working directory')
parser.add_argument('--cmd', choices=['train', 'test'], default='train')
parser.add_argument('--arch', metavar='ARCH',
default='cifar10_rnn_gate_38',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: cifar10_feedforward_38)')
parser.add_argument('--gate_type', type=str, default='ff',
choices=['ff', 'rnn'], help='gate type')
parser.add_argument('--dataset', '-d', default='cifar10', type=str,
choices=['cifar10', 'cifar100'],
help='dataset type')
parser.add_argument('--datadir', default='/home/yf22/dataset', type=str,
help='path to dataset')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4 )')
parser.add_argument('--iters', default=64000, type=int,
help='number of total iterations (default: 64,000)')
parser.add_argument('--start_iter', default=0, type=int,
help='manual iter number (useful on restarts)')
parser.add_argument('--batch_size', default=128, type=int,
help='mini-batch size (default: 128)')
parser.add_argument('--lr_schedule', default='piecewise', type=str,
help='learning rate schedule')
parser.add_argument('--lr', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum')
parser.add_argument('--weight_decay', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--print_freq', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pretrained model')
parser.add_argument('--step_ratio', default=0.1, type=float,
help='ratio for learning rate deduction')
parser.add_argument('--warm_up', action='store_true',
help='for n = 18, the model needs to warm up for 400 '
'iterations')
parser.add_argument('--save_folder', default='save_checkpoints',
type=str,
help='folder to save the checkpoints')
parser.add_argument('--eval_every', default=390, type=int,
help='evaluate model every (default: 1000) iterations')
parser.add_argument('--verbose', action="store_true",
help='print layer skipping ratio at training')
parser.add_argument('--target_ratio', default=100, type=float,
help='target ratio')
parser.add_argument('--target_ratio_range', default=0, type=float,
help='target ratio range')
parser.add_argument('--target_ratio_step', default=0.5, type=float,
help='target ratio step when changed')
parser.add_argument('--computation_loss', default=True, type=bool,
help='using computation loss as regularization term')
parser.add_argument('--proceed', default='False',
help='whether this experiment continues from a checkpoint')
parser.add_argument('--beta', default=1e-3, type=float,
help='coefficient')
parser.add_argument('--beta_decay', default=1, type=float,
help='decay of beta')
parser.add_argument('--ada_beta', default=False, action='store_true',
help='adaptively change beta')
parser.add_argument('--rnn_initial', default=False, action='store_true',
help='whether to initialize rnn to choose full precisioin')
parser.add_argument('--act_fw', default=0, type=int,
help='precision of activation during forward, -1 means dynamic, 0 means no quantize')
parser.add_argument('--act_bw', default=0, type=int,
help='precision of activation during backward, -1 means dynamic, 0 means no quantize')
parser.add_argument('--grad_act_error', default=0, type=int,
help='precision of activation gradient during error backward, -1 means dynamic, 0 means no quantize')
parser.add_argument('--grad_act_gc', default=0, type=int,
help='precision of activation gradient during weight gradient computation, -1 means dynamic, 0 means no quantize')
parser.add_argument('--weight_bits', default=0, type=int,
help='precision of weight')
parser.add_argument('--schedule', default=None, type=int, nargs='*',
help='target ratio schedule')
parser.add_argument('--weight_bits_schedule',default=None,type=float,nargs='*',
help='schedule for weight precision')
parser.add_argument('--momentum_act', default=0.1, type=float,
help='momentum for act min/max')
parser.add_argument('--finetune_step', default=0, type=int,
help='num steps to finetune with full precision')
parser.add_argument('--conv_info', default='', type=str,
help='load the layerwise flops information')
parser.add_argument('--dws_bits', default=8, type=int,
help='precision for dws conv weight and activation')
parser.add_argument('--dws_grad_bits', default=16, type=int,
help='precision for dws conv error and gradient')
parser.add_argument('--num_turning_point', type=int, default=3)
parser.add_argument('--initial_threshold', type=float, default=0.15)
parser.add_argument('--decay', type=float, default=0.4)
args = parser.parse_args()
return args
args = parse_args()
# indicator
class loss_diff_indicator():
def __init__(self, threshold, decay, epoch_keep=5):
self.threshold = threshold
self.decay = decay
self.epoch_keep = epoch_keep
self.loss = []
self.scale_loss = 1
self.loss_diff = [1 for i in range(1, self.epoch_keep)]
def reset(self):
self.loss = []
self.loss_diff = [1 for i in range(1, self.epoch_keep)]
def adaptive_threshold(self, turning_point_count):
decay_1 = self.decay
decay_2 = self.decay
if turning_point_count == 1:
self.threshold *= decay_1
if turning_point_count == 2:
self.threshold *= decay_2
print('threshold decay to {}'.format(self.threshold))
def get_loss(self, current_epoch_loss):
if len(self.loss) < self.epoch_keep:
self.loss.append(current_epoch_loss)
else:
self.loss.pop(0)
self.loss.append(current_epoch_loss)
def cal_loss_diff(self):
if len(self.loss) == self.epoch_keep:
for i in range(len(self.loss)-1):
loss_now = self.loss[-1]
loss_pre = self.loss[i]
self.loss_diff[i] = np.abs(loss_pre - loss_now) / self.scale_loss
return True
else:
return False
def turning_point_emerge(self):
flag = self.cal_loss_diff()
if flag == True:
print(self.loss_diff)
for i in range(len(self.loss_diff)):
if self.loss_diff[i] > self.threshold:
return False
return True
else:
return False
def main():
models.ACT_FW = args.act_fw
models.ACT_BW = args.act_bw
models.GRAD_ACT_ERROR = args.grad_act_error
models.GRAD_ACT_GC = args.grad_act_gc
models.WEIGHT_BITS = args.weight_bits
models.MOMENTUM = args.momentum_act
models.DWS_BITS = args.dws_bits
models.DWS_GRAD_BITS = args.dws_grad_bits
save_path = args.save_path = os.path.join(args.save_folder, args.arch)
os.makedirs(save_path, exist_ok=True)
# config logger file
args.logger_file = os.path.join(save_path, 'log_{}.txt'.format(args.cmd))
handlers = [logging.FileHandler(args.logger_file, mode='w'),
logging.StreamHandler()]
logging.basicConfig(level=logging.INFO,
datefmt='%m-%d-%y %H:%M',
format='%(asctime)s:%(message)s',
handlers=handlers)
global history_score
history_score = np.zeros((args.iters // args.eval_every, 3))
# initialize indicator
# initial_threshold=0.15
global scale_loss
scale_loss = 0
global my_loss_diff_indicator
my_loss_diff_indicator = loss_diff_indicator(threshold=args.initial_threshold,
decay=args.decay)
global turning_point_count
turning_point_count = 0
if args.cmd == 'train':
logging.info('start training {}'.format(args.arch))
run_training(args)
elif args.cmd == 'test':
logging.info('start evaluating {} with checkpoints from {}'.format(
args.arch, args.resume))
test_model(args)
def fix_rnn(model):
for param in model.control.parameters():
param.requires_grad = False
for param in model.control_grad.parameters():
param.requires_grad = False
for g in range(3):
for i in range(model.num_layers[g]):
gate_layer = getattr(model,'group{}_gate{}'.format(g + 1,i))
for param in gate_layer.parameters():
param.requires_grad = False
bits = [3, 4, 4, 6, 6]
grad_bits = [6, 6, 8, 8, 12]
if args.conv_info:
conv_info = np.load(args.conv_info, allow_pickle=True).item()['conv']
dws_info = np.load(args.conv_info, allow_pickle=True).item()['dws']
dws_flops_fw = sum(dws_info) * args.dws_bits * args.dws_bits /32 /32
dws_flops_gc = dws_flops_eb = sum(dws_info) * args.dws_bits * args.dws_grad_bits /32 /32
dws_flops_total = dws_flops_fw + dws_flops_eb + dws_flops_gc
else:
conv_info = None
dws_flops_total = dws_flops_fw = dws_flops_gc = dws_flops_eb = 0
def run_training(args):
training_loss = 0
training_acc = 0
global conv_info
cost_fw = []
for bit in bits:
cost_fw.append(bit/32)
cost_fw = np.array(cost_fw) * args.weight_bits/32
cost_eb = []
for bit in grad_bits:
cost_eb.append(bit/32)
cost_eb = np.array(cost_eb) * args.weight_bits/32
cost_gc = []
for i in range(len(bits)):
cost_gc.append(bits[i] * grad_bits[i]/32/32)
cost_gc = np.array(cost_gc)
# create model
model = models.__dict__[args.arch](args.pretrained, proj_dim=len(bits))
model = torch.nn.DataParallel(model).cuda()
best_prec1 = 0
best_iter = 0
# best_full_prec = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logging.info('=> loading checkpoint `{}`'.format(args.resume))
checkpoint = torch.load(args.resume)
if args.proceed == 'True':
args.start_iter = checkpoint['iter']
else:
args.start_iter = 0
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'],strict=True)
logging.info('=> loaded checkpoint `{}` (iter: {})'.format(
args.resume, checkpoint['iter']
))
else:
logging.info('=> no checkpoint found at `{}`'.format(args.resume))
cudnn.benchmark = True
train_loader = prepare_train_data(dataset=args.dataset,
datadir=args.datadir,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
test_loader = prepare_test_data(dataset=args.dataset,
datadir=args.datadir,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers)
if args.rnn_initial:
for param in model.parameters():
param.requires_grad = False
for param in model.control.parameters():
param.requires_grad = True
for param in model.control_grad.parameters():
param.requires_grad = True
for g in range(3):
for i in range(model.num_layers[g]):
gate_layer = getattr(model,'group{}_gate{}'.format(g + 1,i))
for param in gate_layer.parameters():
param.requires_grad = True
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
skip_ratios = ListAverageMeter()
cp_record = AverageMeter()
cp_record_fw = AverageMeter()
cp_record_eb = AverageMeter()
cp_record_gc = AverageMeter()
network_depth = sum(model.module.num_layers)
if conv_info is None:
conv_info = [1 for _ in range(network_depth)]
layerwise_decision_statistics = []
for k in range(network_depth):
layerwise_decision_statistics.append([])
for j in range(len(cost_fw)):
ratio = AverageMeter()
layerwise_decision_statistics[k].append(ratio)
end = time.time()
global scale_loss
global turning_point_count
global my_loss_diff_indicator
i = args.start_iter
while i < args.iters + args.finetune_step:
for input, target in train_loader:
# measuring data loading time
data_time.update(time.time() - end)
model.train()
# adjust_learning_rate(args, optimizer1, optimizer2, i)
adjust_learning_rate(args, optimizer, i)
adjust_target_ratio(args, turning_point_count)
i += 1
target = target.cuda()
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
if i > args.iters:
output, _ = model(input_var, np.zeros(len(bits)), np.zeros(len(grad_bits)))
computation_cost = 0
cp_ratio = 1
cp_ratio_fw = 1
cp_ratio_eb = 1
cp_ratio_gc = 1
else:
output, masks = model(input_var, bits, grad_bits)
computation_cost_fw = 0
computation_cost_eb = 0
computation_cost_gc = 0
for layer in range(network_depth):
full_layer = reduce((lambda x, y: x * y), masks[layer][0].shape)
for k in range(len(cost_fw)):
dynamic_choice = masks[layer][k].sum()
ratio = dynamic_choice / full_layer
layerwise_decision_statistics[layer][k].update(ratio.data, 1)
computation_cost_fw += masks[layer][k].sum() * cost_fw[k] * conv_info[layer]
computation_cost_eb += masks[layer][k].sum() * cost_eb[k] * conv_info[layer]
computation_cost_gc += masks[layer][k].sum() * cost_gc[k] * conv_info[layer]
computation_cost_fw += dws_flops_fw * args.batch_size
computation_cost_eb += dws_flops_eb * args.batch_size
computation_cost_gc += dws_flops_gc * args.batch_size
computation_cost = computation_cost_fw + computation_cost_eb + computation_cost_gc
cp_ratio_fw = float(computation_cost_fw) / args.batch_size / (sum(conv_info) + dws_flops_fw) * 100
cp_ratio_eb = float(computation_cost_eb) / args.batch_size / (sum(conv_info) + dws_flops_eb) * 100
cp_ratio_gc = float(computation_cost_gc) / args.batch_size / (sum(conv_info) + dws_flops_gc) * 100
cp_ratio = float(computation_cost) / args.batch_size / (sum(conv_info)*3 + dws_flops_total) * 100
computation_loss = computation_cost / np.mean(conv_info) * args.beta
if cp_ratio < args.target_ratio:
reg = -1
elif cp_ratio >= args.target_ratio + args.target_ratio_range:
reg = 1
else:
reg = 0
loss_cls = criterion(output, target_var)
if computation_loss > loss_cls/10 and args.ada_beta:
computation_loss *= loss_cls.detach()/10/computation_loss.detach()
if args.computation_loss:
loss = loss_cls + computation_loss * reg
else:
loss = loss_cls
# measure accuracy and record loss
prec1, = accuracy(output.data, target, topk=(1,))
losses.update(loss.item(), input.size(0))
training_loss += loss.item()
top1.update(prec1.item(), input.size(0))
training_acc += prec1.item()
# skip_ratios.update(skips, input.size(0))
cp_record.update(cp_ratio,1)
cp_record_fw.update(cp_ratio_fw,1)
cp_record_eb.update(cp_ratio_eb,1)
cp_record_gc.update(cp_ratio_gc,1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# repackage hidden units for RNN Gate
if args.gate_type == 'rnn':
model.module.control.repackage_hidden()
batch_time.update(time.time() - end)
end = time.time()
# print log
if i % args.print_freq == 0 or i == (args.iters - 1):
logging.info("Iter: [{0}/{1}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.3f} ({loss.avg:.3f})\t"
"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t"
"Computation_Percentage: {cp_record.val:.3f}({cp_record.avg:.3f})\t"
"Computation_Percentage_FW: {cp_record_fw.val:.3f}({cp_record_fw.avg:.3f})\t"
"Computation_Percentage_EB: {cp_record_eb.val:.3f}({cp_record_eb.avg:.3f})\t"
"Computation_Percentage_GC: {cp_record_gc.val:.3f}({cp_record_gc.avg:.3f})\t".format(
i,
args.iters,
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
cp_record=cp_record,
cp_record_fw=cp_record_fw,
cp_record_eb=cp_record_eb,
cp_record_gc=cp_record_gc)
)
# evaluate every 1000 steps
if (i % args.eval_every == 0 and i > 0) or (i == args.iters):
global history_score
epoch = i // args.eval_every
epoch_loss = training_loss / len(train_loader)
with torch.no_grad():
prec1 = validate(args, test_loader, model, criterion, i)
# prec_full = validate_full_prec(args, test_loader, model, criterion, i)
history_score[epoch-1][0] = epoch_loss
history_score[epoch-1][1] = np.round(training_acc / len(train_loader), 2)
history_score[epoch-1][2] = prec1
training_loss = 0
training_acc = 0
np.savetxt(os.path.join(args.save_path, 'record.txt'), history_score, fmt = '%10.5f', delimiter=',')
if epoch <= 10:
scale_loss += epoch_loss
logging.info('scale_loss at epoch {}: {}'.format(epoch, scale_loss / epoch))
my_loss_diff_indicator.scale_loss = scale_loss / epoch
if turning_point_count < args.num_turning_point:
my_loss_diff_indicator.get_loss(epoch_loss)
flag = my_loss_diff_indicator.turning_point_emerge()
if flag == True:
turning_point_count += 1
logging.info('find {}-th turning point at {}-th epoch'.format(turning_point_count, epoch))
# print('find {}-th turning point at {}-th epoch'.format(turning_point_count, epoch))
my_loss_diff_indicator.adaptive_threshold(turning_point_count=turning_point_count)
my_loss_diff_indicator.reset()
logging.info('Epoch [{}], target_ratio=[{},{}]'.format(epoch, args.target_ratio, args.target_ratio+args.target_ratio_range))
is_best = prec1 > best_prec1
if is_best:
best_prec1 = prec1
best_iter = i
# best_full_prec = max(prec_full, best_full_prec)
print("Current Best Prec@1: ", best_prec1)
print("Current Best Iteration: ", best_iter)
checkpoint_path = os.path.join(args.save_path, 'checkpoint_{:05d}_{:.2f}.pth.tar'.format(i, prec1))
save_checkpoint({
'iter': i,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best = is_best, filename=checkpoint_path)
shutil.copyfile(checkpoint_path, os.path.join(args.save_path,
'checkpoint_latest'
'.pth.tar'))
if i == args.iters:
print("Best accuracy: "+str(best_prec1))
history_score[-1][0] = best_prec1
np.savetxt(os.path.join(args.save_path, 'record.txt'), history_score, fmt = '%10.5f', delimiter=',')
break
if i >= args.iters + args.finetune_step:
break
def validate(args, test_loader, model, criterion, step):
global conv_info
cost_fw = []
for bit in bits:
cost_fw.append(bit/32)
cost_fw = np.array(cost_fw) * args.weight_bits/32
cost_eb = []
for bit in grad_bits:
cost_eb.append(bit/32)
cost_eb = np.array(cost_eb) * args.weight_bits/32
cost_gc = []
for i in range(len(bits)):
cost_gc.append(bits[i] * grad_bits[i]/32/32)
cost_gc = np.array(cost_gc)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
skip_ratios = ListAverageMeter()
cp_record = AverageMeter()
cp_record_fw = AverageMeter()
cp_record_eb = AverageMeter()
cp_record_gc = AverageMeter()
network_depth = sum(model.module.num_layers)
layerwise_decision_statistics = []
for k in range(network_depth):
layerwise_decision_statistics.append([])
for j in range(len(cost_fw)):
ratio = AverageMeter()
layerwise_decision_statistics[k].append(ratio)
model.eval()
end = time.time()
for i, (input, target) in enumerate(test_loader):
data_time.update(time.time() - end)
target = target.cuda()
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
output, masks = model(input_var, bits, grad_bits)
computation_cost_fw = 0
computation_cost_eb = 0
computation_cost_gc = 0
computation_all = 0
for layer in range(network_depth):
full_layer = reduce((lambda x, y: x * y), masks[layer][0].shape)
for k in range(len(cost_fw)):
dynamic_choice = masks[layer][k].sum()
ratio = dynamic_choice / full_layer
layerwise_decision_statistics[layer][k].update(ratio.data, 1)
computation_cost_fw += masks[layer][k].sum() * cost_fw[k] * conv_info[layer]
computation_cost_eb += masks[layer][k].sum() * cost_eb[k] * conv_info[layer]
computation_cost_gc += masks[layer][k].sum() * cost_gc[k] * conv_info[layer]
computation_cost_fw += dws_flops_fw * args.batch_size
computation_cost_eb += dws_flops_eb * args.batch_size
computation_cost_gc += dws_flops_gc * args.batch_size
computation_cost = computation_cost_fw + computation_cost_eb + computation_cost_gc
cp_ratio_fw = float(computation_cost_fw) / args.batch_size / (sum(conv_info) + dws_flops_fw) * 100
cp_ratio_eb = float(computation_cost_eb) / args.batch_size / (sum(conv_info) + dws_flops_eb) * 100
cp_ratio_gc = float(computation_cost_gc) / args.batch_size / (sum(conv_info) + dws_flops_gc) * 100
cp_ratio = float(computation_cost) / args.batch_size / (sum(conv_info)*3 + dws_flops_total) * 100
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, = accuracy(output.data, target, topk=(1,))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# skip_ratios.update(skips, input.size(0))
cp_record.update(cp_ratio,1)
cp_record_fw.update(cp_ratio_fw,1)
cp_record_eb.update(cp_ratio_eb,1)
cp_record_gc.update(cp_ratio_gc,1)
# repackage hidden units for RNN Gate
if args.gate_type == 'rnn':
model.module.control.repackage_hidden()
batch_time.update(time.time() - end)
end = time.time()
# print log
if i % args.print_freq == 0 or (i == (len(test_loader) - 1)):
logging.info("Iter: [{0}/{1}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.3f} ({loss.avg:.3f})\t"
"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t"
"Computation_Percentage: {cp_record.val:.3f}({cp_record.avg:.3f})\t"
"Computation_Percentage_FW: {cp_record_fw.val:.3f}({cp_record_fw.avg:.3f})\t"
"Computation_Percentage_EB: {cp_record_eb.val:.3f}({cp_record_eb.avg:.3f})\t"
"Computation_Percentage_GC: {cp_record_gc.val:.3f}({cp_record_gc.avg:.3f})\t".format(
i,
len(test_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
cp_record=cp_record,
cp_record_fw=cp_record_fw,
cp_record_eb=cp_record_eb,
cp_record_gc=cp_record_gc)
)
logging.info('Step {} * Prec@1 {top1.avg:.3f}, Loss {loss.avg:.3f}'.format(step, top1=top1, loss=losses))
for layer in range(network_depth):
print('layer{}_decision'.format(layer + 2))
for g in range(len(cost_fw)):
print('{}_ratio{}'.format(g,layerwise_decision_statistics[layer][g].avg))
return top1.avg
def validate_full_prec(args, test_loader, model, criterion, step):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
bits_full = np.zeros(len(bits))
grad_bits_full = np.zeros(len(grad_bits))
# switch to evaluation mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(test_loader):
target = target.cuda()
input_var = Variable(input, volatile=True).cuda()
target_var = Variable(target, volatile=True).cuda()
# compute output
output, _ = model(input_var, bits_full, grad_bits_full)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, = accuracy(output.data, target, topk=(1,))
top1.update(prec1.item(), input.size(0))
# skip_ratios.update(skips, input.size(0))
losses.update(loss.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if args.gate_type == 'rnn':
model.module.control.repackage_hidden()
logging.info('Step {} * Full Prec@1 {top1.avg:.3f}, Loss {loss.avg:.3f}'.format(step, top1=top1, loss=losses))
return top1.avg
def test_model(args):
global conv_info
model = models.__dict__[args.arch](args.pretrained, proj_dim=len(bits))
model = torch.nn.DataParallel(model).cuda()
if args.resume:
if os.path.isfile(args.resume):
logging.info('=> loading checkpoint `{}`'.format(args.resume))
checkpoint = torch.load(args.resume)
args.start_iter = checkpoint['iter']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'],strict=True)
logging.info('=> loaded checkpoint `{}` (iter: {})'.format(
args.resume, checkpoint['iter']
))
else:
logging.info('=> no checkpoint found at `{}`'.format(args.resume))
network_depth = sum(model.module.num_layers)
if conv_info is None:
conv_info = [1 for _ in range(network_depth)]
cudnn.benchmark = False
test_loader = prepare_test_data(dataset=args.dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers)
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
validate(args, test_loader, model, criterion, args.start_iter)
# validate_full_prec(args, test_loader, model, criterion, args.start_iter)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
save_path = os.path.dirname(filename)
shutil.copyfile(filename, os.path.join(save_path,
'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ListAverageMeter(object):
"""Computes and stores the average and current values of a list"""
def __init__(self):
self.len = 10000 # set up the maximum length
self.reset()
def reset(self):
self.val = [0] * self.len
self.avg = [0] * self.len
self.sum = [0] * self.len
self.count = 0
def set_len(self, n):
self.len = n
self.reset()
def update(self, vals, n=1):
assert len(vals) == self.len, 'length of vals not equal to self.len'
self.val = vals
for i in range(self.len):
self.sum[i] += self.val[i] * n
self.count += n
for i in range(self.len):
self.avg[i] = self.sum[i] / self.count
target_ratio_list = [(args.target_ratio + i*args.target_ratio_step) for i in range(args.num_turning_point+1)]
def adjust_target_ratio(args, turning_point_count):
args.target_ratio = target_ratio_list[turning_point_count]
def adjust_learning_rate(args, optimizer, _iter):
if args.lr_schedule == 'piecewise':
if args.warm_up and (_iter < 400):
lr = 0.01
elif 32000 <= _iter < 48000:
lr = args.lr * (args.step_ratio ** 1)
elif _iter >= 48000:
lr = args.lr * (args.step_ratio ** 2)
else:
lr = args.lr
elif args.lr_schedule == 'linear':
t = _iter / args.iters
lr_ratio = 0.01
if args.warm_up and (_iter < 400):
lr = 0.01
elif t < 0.25:
lr = args.lr
elif t < 0.75:
lr = args.lr * (1 - (1-lr_ratio)*(t-0.25)/0.5)
else:
lr = args.lr * lr_ratio
elif args.lr_schedule == 'anneal_cosine':
lr_min = args.lr * (args.step_ratio ** 2)
lr_max = args.lr
lr = lr_min + 1/2 * (lr_max - lr_min) * (1 + np.cos(_iter/args.iters * 3.141592653))
if _iter % args.eval_every == 0:
logging.info('Iter [{}] learning rate = {}'.format(_iter, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
11515478
|
from parsuite.core.argument import Argument,DefaultArguments
from parsuite import helpers
from parsuite.core.suffix_printer import *
import xml.etree.ElementTree as ET
import argparse
import os
import sqlite3
import csv
from sys import exit,stdout
from datetime import datetime
import pdb
help = 'Accept an Firefox cookie file (SQLite3) and dump each record' \
' in CSV format. strLastAccessed and strCreationTime are added' \
' to each record to help find the freshest cookies. The final' \
' column contains the constructed cookie.'
FIELDS = ['id','baseDomain','name','value','host','path','expiry',
'lastAccessed','creationTime','strExpiry','strLastAccessed',
'strCreationTime','cookie']
args = [
DefaultArguments.input_files,
Argument('--delimiter','-d',
default=',',
help='Delimiter that separates values'),
Argument('--fields','-fs',
default=FIELDS,
help='Which fields to return. Valid Values: %(default)s',
nargs='+')
]
# https://linuxfreelancer.com/decoding-firefox-cookies-sqlite-cookies-viewer
MAXDATE=2049840000
def convert(epoch):
mydate=epoch[:10]
if int(mydate)>MAXDATE: mydate=str(MAXDATE)
if len(epoch)>10: mytime=epoch[11:]
else: mytime='0'
fulldate=float(mydate+'.'+mytime)
x=datetime.fromtimestamp(fulldate)
return x.ctime()
def parse(input_files=None, delimiter=',', fields=None, **kwargs):
input_files = input_files or []
for f in fields:
if f not in FIELDS: raise Exception(f'Invalid field value: {f}')
cw = csv.writer(stdout,delimiter=delimiter)
cw.writerow(fields)
for input_file in input_files:
esprint(f'Connecting to the database: {input_file}')
try:
conn = sqlite3.connect(input_file)
cur = conn.cursor()
# Get table columns
cols = [r[1] for r in
cur.execute('PRAGMA table_info(moz_cookies)') \
.fetchall()]
# Determine offsets within row for current table
offsets = {f:cols.index(f) for f in cols}
# Dump row contents
for record in cur.execute('SELECT * FROM moz_cookies'):
record = list(record)
drecord = {f:record[o] for f,o in offsets.items()}
if 'expiry' in cols:
drecord['strExpiry'] = convert(
str(record[offsets['expiry']])
)
if 'lastAccessed' in cols:
drecord['strLastAccessed'] = convert(
str(record[offsets['lastAccessed']])
)
if 'creationTime' in cols:
drecord['strCreationTime'] = convert(
str(record[offsets['creationTime']])
)
if 'name' in cols and 'value' in cols:
drecord['cookie'] = f'{drecord["name"]}={drecord["value"]};'
cw.writerow([drecord[f] for f in fields])
except sqlite3.Error as e:
esprint(f'Error occurred when dumping {input_file}!\n\n', WAR)
print(e.__str__(),file=stderr)
continue
return 0
|
11515490
|
from __future__ import print_function
# This lets us use the python3-style print() function even in python2. It should have no effect if you're already running python3.
import os
import dwl
import numpy as np
# Configure the printing
np.set_printoptions(suppress=True)
# Construct an instance of the WholeBodyDynamics class, which wraps the C++ class.
ws = dwl.WholeBodyState()
fbs = dwl.FloatingBaseSystem()
wdyn = dwl.WholeBodyDynamics()
# Resetting the system from the hyq urdf file
fpath = os.path.dirname(os.path.abspath(__file__))
wdyn.modelFromURDFFile(fpath + "/../hyq.urdf", fpath + "/../../config/hyq.yarf")
fbs = wdyn.getFloatingBaseSystem()
# Define the DoF after initializing the robot model
ws.setJointDoF(fbs.getJointDoF())
# The robot state
ws.setBasePosition(np.array([0., 0., 0.]))
ws.setBaseRPY(np.array([0., 0., 0.]))
ws.setBaseVelocity_W(np.array([0., 0., 0.]))
ws.setBaseRPYVelocity_W(np.array([0., 0., 0.]))
ws.setBaseAcceleration_W(np.array([0., 0., 0.]))
ws.setBaseRPYAcceleration_W(np.array([0., 0., 0.]))
ws.setJointPosition(0.75, fbs.getJointId("lf_hfe_joint"))
ws.setJointPosition(-1.5, fbs.getJointId("lf_kfe_joint"))
ws.setJointPosition(-0.75, fbs.getJointId("lh_hfe_joint"))
ws.setJointPosition(1.5, fbs.getJointId("lh_kfe_joint"))
ws.setJointPosition(0.75, fbs.getJointId("rf_hfe_joint"))
ws.setJointPosition(-1.5, fbs.getJointId("rf_kfe_joint"))
ws.setJointPosition(-0.75, fbs.getJointId("rh_hfe_joint"))
ws.setJointPosition(1.5, fbs.getJointId("rh_kfe_joint"))
grf = { 'lh_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'rf_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'lh_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'rh_foot' : np.array([0., 0., 0., 0., 0., 190.778]) };
base_eff = ws.base_eff
joint_eff = ws.joint_eff
base_pos = ws.base_pos
joint_pos = ws.joint_pos
base_vel = ws.base_vel
joint_vel = ws.joint_vel
base_acc = ws.base_acc
joint_acc = ws.joint_acc
noforce = dict()
wdyn.computeInverseDynamics(base_eff, joint_eff,
base_pos, joint_pos,
base_vel, joint_vel,
base_acc, joint_acc,
grf);
print("----------------------------- Inverse Dynamics ------------------------------------")
print("Base wrench:", base_eff.transpose())
print("Joint forces:", joint_eff.transpose())
print()
print("------------------------ Floating-based Inverse Dynamics --------------------------")
wdyn.computeFloatingBaseInverseDynamics(base_acc, joint_eff,
base_pos, joint_pos,
base_vel, joint_vel,
joint_acc, grf);
print("Base acceleration:", base_acc.transpose())
print("Joint forces:", joint_eff.transpose())
print()
print("---------------------- Constrained Floating-based Dynamics ------------------------")
wdyn.computeConstrainedFloatingBaseInverseDynamics(joint_eff,
base_pos, joint_pos,
base_vel, joint_vel,
base_acc, joint_acc,
fbs.getEndEffectorNames(dwl.FOOT));
print("Joint forces:", joint_eff.transpose())
print()
print("--------------------------- Gravitational Wrench ----------------------------------")
com_pos = np.zeros(3)
grav_wrench_W = wdyn.computeGravitoWrench(com_pos)
print("The gravitational wrench:", grav_wrench_W.transpose())
print()
print("---------------------------- Inertial matrices ------------------------------------")
joint_inertial_mat = wdyn.computeJointSpaceInertiaMatrix(base_pos, joint_pos);
print("The joint-space inertial matrix: ", joint_inertial_mat)
com_inertial_mat = wdyn.computeCentroidalInertiaMatrix(base_pos, joint_pos)
print("The centroidal inertial matrix: ", com_inertial_mat)
print()
print("----------------------------- Contact forces --------------------------------------")
contact_forces = dict()
wdyn.computeContactForces(contact_forces, joint_eff,
base_pos, joint_pos,
base_vel, joint_vel,
base_acc, joint_acc,
fbs.getEndEffectorNames(dwl.FOOT))
print("The contact forces:", contact_forces)
print("The joint efforts:", joint_eff.transpose())
print()
print("------------------------ Estimated contact forces ---------------------------------")
wdyn.estimateContactForces(contact_forces,
base_pos, joint_pos,
base_vel, joint_vel,
base_acc, joint_acc,
joint_eff, fbs.getEndEffectorNames(dwl.FOOT));
print("The contact forces:", contact_forces)
print()
print("-------------------------- Center of pressure -------------------------------------")
cop_pos = np.zeros(3)
contact_forces = { 'lf_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'lh_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'rf_foot' : np.array([0., 0., 0., 0., 0., 190.778]),
'rh_foot' : np.array([0., 0., 0., 0., 0., 190.778]) };
contact_pos = { 'lf_foot' : np.array([0.371, 0.207, -0.589]),
'lh_foot' : np.array([-0.371, 0.207, -0.589]),
'rf_foot' : np.array([0.371, -0.207, -0.589]),
'rh_foot' : np.array([-0.371, -0.207, -0.589]) }
wdyn.computeCenterOfPressure(cop_pos,
contact_forces,
contact_pos);
print("The center of pressure:", cop_pos.transpose())
print()
print("--------------------------- Capture point -----------------------------------------")
icp_pos = np.zeros(3)
com_pos = fbs.getSystemCoM(ws.base_pos, ws.joint_pos)
com_vel = fbs.getSystemCoMRate(ws.base_pos, ws.joint_pos,
ws.base_vel, ws.joint_vel)
height = 0.589
wdyn.computeInstantaneousCapturePoint(icp_pos,
com_pos, com_vel,
height);
print("The instantaneous capture point:", icp_pos.transpose())
print()
print("----------------------- Centroidal moment pivot -----------------------------------")
cmp_pos = np.zeros(3)
wdyn.computeCentroidalMomentPivot(cmp_pos,
com_pos, height,
contact_forces);
print("The centroidal moment pivot:", cmp_pos.transpose())
print()
print("----------------------------- CoM torque ------------------------------------------")
com_torque = np.zeros(3)
wdyn.computeCoMTorque(com_torque,
cop_pos, cmp_pos,
contact_forces);
print("The CoM torque:", com_torque.transpose())
print()
print("----------------------- Estimated GRFs from CoP -----------------------------------")
wdyn.estimateGroundReactionForces(grf,
cop_pos, contact_pos,
fbs.getEndEffectorNames(dwl.FOOT));
print("The estimated GRFs:", grf)
print()
print("------------------------- Estimated active contacts -------------------------------")
force_threshold = 50.
active_contacts = dwl.string_List()
contact_forces = dict()
wdyn.estimateActiveContactsAndForces(active_contacts, contact_forces,
base_pos, joint_pos,
base_vel, joint_vel,
base_acc, joint_acc,
joint_eff, fbs.getEndEffectorNames(dwl.FOOT), # it uses all the end-effector of the system
force_threshold);
print("The active contacts:", active_contacts)
print("The active contact forces:", contact_forces)
print()
print("------------------------------ Active contacts ------------------------------------")
wdyn.getActiveContacts(active_contacts,
contact_forces, force_threshold);
print("The active contacts:", active_contacts)
|
11515546
|
from django import forms
from .models import Revision
class RevisionForm(forms.ModelForm):
revision_pk = forms.IntegerField(required=False, widget=forms.HiddenInput())
message = forms.CharField(required=False, help_text="Leave a helpful message about your change")
def __init__(self, *args, **kwargs):
self.revision = kwargs.pop("revision")
super().__init__(*args, **kwargs)
if self.revision:
self.fields["content"].initial = self.revision.content
self.fields["revision_pk"].initial = self.revision.pk
else:
self.fields["content"].initial = "add content and create a new page"
self.fields["message"].initial = "initial revision"
def clean_content(self):
if self.revision and self.cleaned_data["content"] == self.revision.content:
raise forms.ValidationError("You made no stinking changes")
return self.cleaned_data["content"]
def clean(self):
if self.revision and self.cleaned_data.get("revision_pk") != self.revision.pk:
raise forms.ValidationError("Someone edited this before you")
return self.cleaned_data
class Meta:
model = Revision
fields = [
"revision_pk",
"content",
"message"
]
|
11515550
|
import numpy as np
import pytest
import tensorflow as tf
from larq_zoo import preprocess_input
def test_numpy_input():
image = np.random.randint(0, 255, size=(300, 300, 3), dtype="uint8")
prepro = preprocess_input(image)
assert isinstance(prepro, np.ndarray)
def test_tensor_input():
image = np.random.randint(0, 255, size=(300, 300, 3), dtype="uint8")
tf_image = tf.constant(image)
prepro = preprocess_input(tf_image)
assert isinstance(prepro, tf.Tensor)
def test_wrong_input():
with pytest.raises(ValueError, match="Input must be of size .*"):
preprocess_input(np.random.randint(0, 255, size=(4, 32, 32, 3), dtype="uint8"))
|
11515555
|
import onmt
import onmt.Markdown
import argparse
import torch
def loadImageLibs():
"Conditional import of torch image libs."
global Image, transforms
from PIL import Image
from torchvision import transforms
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.Markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="bitext",
choices=["bitext", "monotext", "img"],
help="""Type of the source input.
This affects all the subsequent operations
Options are [bitext|monotext|img].""")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-train',
help="""Path to the monolingual training data""")
parser.add_argument('-train_src', required=False,
help="Path to the training source data")
parser.add_argument('-train_tgt', required=False,
help="Path to the training target data")
parser.add_argument('-valid',
help="""Path to the monolingual validation data""")
parser.add_argument('-valid_src', required=False,
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=False,
help="Path to the validation target data")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def makeVocabulary(filename, size):
vocab = onmt.Dict([onmt.Constants.PAD_WORD, onmt.Constants.UNK_WORD,
onmt.Constants.BOS_WORD, onmt.Constants.EOS_WORD],
lower=opt.lower)
with open(filename) as f:
for sent in f.readlines():
for word in sent.split():
vocab.add(word)
originalSize = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), originalSize))
return vocab
def initVocabulary(name, dataFile, vocabFile, vocabSize):
vocab = None
if vocabFile is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocabFile + '\'...')
vocab = onmt.Dict()
vocab.loadFile(vocabFile)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
# If a dictionary is still missing, generate it.
print('Building ' + name + ' vocabulary...')
genWordVocab = makeVocabulary(dataFile, vocabSize)
vocab = genWordVocab
print()
return vocab
def saveVocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def makeBilingualData(srcFile, tgtFile, srcDicts, tgtDicts):
src, tgt = [], []
sizes = []
count, ignored = 0, 0
print('Processing %s & %s ...' % (srcFile, tgtFile))
srcF = open(srcFile)
tgtF = open(tgtFile)
while True:
sline = srcF.readline()
tline = tgtF.readline()
# normal end of file
if sline == "" and tline == "":
break
# source or target does not have same number of lines
if sline == "" or tline == "":
print('WARNING: src and tgt do not have the same # of sentences')
break
sline = sline.strip()
tline = tline.strip()
# source and/or target are empty
if sline == "" or tline == "":
print('WARNING: ignoring an empty line ('+str(count+1)+')')
continue
srcWords = sline.split()
tgtWords = tline.split()
if len(srcWords) <= opt.src_seq_length \
and len(tgtWords) <= opt.tgt_seq_length:
# Check truncation condition.
if opt.src_seq_length_trunc != 0:
srcWords = srcWords[:opt.src_seq_length_trunc]
if opt.tgt_seq_length_trunc != 0:
tgtWords = tgtWords[:opt.tgt_seq_length_trunc]
if opt.src_type == "bitext":
src += [srcDicts.convertToIdx(srcWords,
onmt.Constants.UNK_WORD)]
elif opt.src_type == "img":
loadImageLibs()
src += [transforms.ToTensor()(
Image.open(opt.src_img_dir + "/" + srcWords[0]))]
tgt += [tgtDicts.convertToIdx(tgtWords,
onmt.Constants.UNK_WORD,
onmt.Constants.BOS_WORD,
onmt.Constants.EOS_WORD)]
sizes += [len(srcWords)]
else:
ignored += 1
count += 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
srcF.close()
tgtF.close()
if opt.shuffle == 1:
print('... shuffling sentences')
perm = torch.randperm(len(src))
src = [src[idx] for idx in perm]
tgt = [tgt[idx] for idx in perm]
sizes = [sizes[idx] for idx in perm]
print('... sorting sentences by size')
_, perm = torch.sort(torch.Tensor(sizes))
src = [src[idx] for idx in perm]
tgt = [tgt[idx] for idx in perm]
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, opt.src_seq_length, opt.tgt_seq_length))
return src, tgt
def makeMonolingualData(srcFile, srcDicts):
src = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (srcFile))
with open(srcFile) as srcF:
for sline in srcF:
sline = sline.strip()
# source and/or target are empty
if sline == "":
print('WARNING: ignoring an empty line ('+str(count+1)+')')
continue
srcWords = sline.split()
if len(srcWords) <= opt.src_seq_length:
# Check truncation condition.LGRU_model_1layers_acc_54.83_ppl_12.43_e1.pt
if opt.src_seq_length_trunc != 0:
srcWords = srcWords[:opt.src_seq_length_trunc]
src += [srcDicts.convertToIdx(srcWords,
onmt.Constants.UNK_WORD,
onmt.Constants.BOS_WORD,
onmt.Constants.EOS_WORD)]
sizes += [len(srcWords)]
else:
ignored += 1
count += 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
if opt.shuffle == 1:
print('... shuffling sentences')
perm = torch.randperm(len(src))
src = [src[idx] for idx in perm]
sizes = [sizes[idx] for idx in perm]
print('... sorting sentences by size')
_, perm = torch.sort(torch.Tensor(sizes))
src = [src[idx] for idx in perm]
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d)') %
(len(src), ignored, opt.src_seq_length))
return src
def main():
if opt.src_type in ['bitext', 'img']:
assert None not in [opt.train_src, opt.train_tgt,
opt.valid_src, opt.valid_tgt], \
"With source type %s the following parameters are" \
"required: -train_src, -train_tgt, " \
"-valid_src, -valid_tgt" % (opt.src_type)
elif opt.src_type == 'monotext':
assert None not in [opt.train, opt.valid], \
"With source type monotext the following " \
"parameters are required: -train, -valid"
dicts = {}
dicts['src'] = onmt.Dict()
if opt.src_type == 'bitext':
dicts['src'] = initVocabulary('source', opt.train_src, opt.src_vocab,
opt.src_vocab_size)
dicts['tgt'] = initVocabulary('target', opt.train_tgt, opt.tgt_vocab,
opt.tgt_vocab_size)
elif opt.src_type == 'monotext':
dicts['src'] = initVocabulary('source', opt.train, opt.src_vocab,
opt.src_vocab_size)
elif opt.src_type == 'img':
dicts['tgt'] = initVocabulary('target', opt.train_tgt, opt.tgt_vocab,
opt.tgt_vocab_size)
print('Preparing training ...')
train = {}
valid = {}
if opt.src_type in ['bitext', 'img']:
train['src'], train['tgt'] = makeBilingualData(opt.train_src,
opt.train_tgt,
dicts['src'],
dicts['tgt'])
print('Preparing validation ...')
valid['src'], valid['tgt'] = makeBilingualData(opt.valid_src,
opt.valid_tgt,
dicts['src'],
dicts['tgt'])
elif opt.src_type == 'monotext':
train['src'] = makeMonolingualData(opt.train, dicts['src'])
train['tgt'] = train['src'] # Keeps compatibility with bilingual code
print('Preparing validation ...')
valid['src'] = makeMonolingualData(opt.valid, dicts['src'])
valid['tgt'] = valid['src']
if opt.src_vocab is None:
saveVocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.src_type in ['bitext', 'img'] and opt.tgt_vocab is None:
saveVocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
if __name__ == "__main__":
main()
|
11515599
|
from unrealsdk import *
import os
from . import travel
from . import betterspawns
from Mods.ModMenu import SDKMod, EnabledSaveType, Keybind
class Main(SDKMod):
Name: str = "BSABT"
Description: str = "<B><U><font size='14' color='#e8131d'>Better Spawns and Better Travel</font></U></B>\n" \
"This Mod reimplements some of the BL3 QoL features, such as spawning at the last respawn station " \
"you triggered in game, allowing you to open the FT list form anywhere and directly spawning in " \
"your car or near a FT Station directly from the map menu. To teleport in your car or near a FT " \
"simply place and remove a waypoint near the car/FT on your map. To open the FT menu press the (by " \
"default) F1 key."
Author: str = "Juso"
Types: ModTypes = ModTypes.Utility
SaveEnabledState: EnabledSaveType = EnabledSaveType.LoadWithSettings
Keybinds = [Keybind("Show FT", "F1")]
def __init__(self):
super().__init__()
self.FILE_PATH = os.path.dirname(os.path.realpath(__file__))
self.Travel = travel.MapFT()
self.Spawns = betterspawns.Spawns(self.FILE_PATH)
def GameInputPressed(self, input):
self.Travel.GameInputPressed(input)
def Enable(self):
self.Travel.Enable()
self.Spawns.Enable()
def Disable(self):
self.Travel.Disable()
self.Spawns.Disable()
if __name__.startswith("Mods"):
RegisterMod(Main())
|
11515639
|
import shlex
import subprocess
def run(command: str, with_console: bool = True, line_limit: int = None) -> None:
output = subprocess.run(shlex.split(command), capture_output=True, text=True)
print()
if with_console:
print("```console")
print(f"$ {command}")
output = output.stdout.strip()
if line_limit:
output = "".join(output.splitlines(keepends=True)[:line_limit]) + "..."
print(output)
if with_console:
print("```")
print()
|
11515653
|
from django.http.response import JsonResponse
from django.views.decorators.http import require_http_methods
import sys, MySQLdb
sys.path.insert(0, '..')
from mysite import db_config
def db_query(std_name, std_type):
conn = db_config.mysql_connect()
curs = conn.cursor()
if std_type == 'detail':
'''
请求类型:GET
请求参数:std_name数据标准名
返回参数:
id 主键id
std_id 标准编号
name 标准名称
en_name 标准英文名称
business_definition 业务定义
business_rule 业务规则
std_source 标准来源
data_type 数据类型
data_format 数据格式
code_rule 编码规则
code_range 编码范围
code_meaning 编码含义
business_range 业务范围
dept 数据责任部门
system 数据使用系统
'''
sql = f"""select id,std_id,name,en_name,business_definition,business_rule,std_source,data_type,data_format,
code_rule,code_range,code_meaning,business_range,dept,`system`
from data_standard_detail
where name='{std_name}'"""
print(sql)
curs.execute(sql)
result = curs.fetchone()
return {
'std_id': result[1],
'name': result[2],
'en_name': result[3],
'business_definition': result[4],
'business_rule': result[5],
'std_source': result[6],
'data_type': result[7],
'data_format': result[8],
'code_rule': result[9],
'code_range': result[10],
'code_meaning': result[11],
'business_range': result[12],
'dept': result[13],
'system': result[14],
}
elif std_type == 'desc':
'''
请求类型:GET
请求参数:std_name数据标准名
返回参数:
id 主键id
name 标准名称
content 标准内容
'''
sql = f"select id,name,content from data_standard_desc where name='{std_name}'"
curs.execute(sql)
result = curs.fetchone()
return {
'name': result[1],
'content': result[2],
}
curs.close()
conn.close()
# 查询数据标准
@require_http_methods(["GET"])
def query_detail(request):
std_name = request.GET.get('std_name')
std_type = request.GET.get('std_type')
if not all([std_name, std_type]):
return JsonResponse({'msg': '请求参数缺失', 'code': 3000})
data = db_query(std_name, std_type)
return JsonResponse(data)
# 查询数据标准编辑记录
@require_http_methods(["GET"])
def query_update_history(request):
std_name = request.GET.get('std_name')
if std_name is None:
return JsonResponse({'msg': '请求参数缺失', 'code': 3000})
conn = db_config.mysql_connect()
curs = conn.cursor()
sql = f"select username,update_time from data_standard_update_log where std_name='{std_name}' order by update_time desc limit 1"
if curs.execute(sql) == 1:
result = curs.fetchone()
return JsonResponse({'username': result[0], 'last_update_time': str(result[1])})
else:
return JsonResponse({'username': None, 'last_update_time': None})
# 更新数据标准
@require_http_methods(["POST"])
def update(request):
username = request.POST.get('username')
std_type = request.POST.get('std_type')
std_name = request.POST.get('std_name')
en_name = request.POST.get('en_name')
business_definition = request.POST.get('business_definition')
business_rule = request.POST.get('business_rule')
std_source = request.POST.get('std_source')
data_type = request.POST.get('data_type')
data_format = request.POST.get('data_format')
code_rule = request.POST.get('code_rule')
code_range = request.POST.get('code_range')
code_meaning = request.POST.get('code_meaning')
business_range = request.POST.get('business_range')
dept = request.POST.get('dept')
system = request.POST.get('system')
content = request.POST.get('content')
conn = db_config.mysql_connect()
curs = conn.cursor()
curs.execute('set autocommit=0')
if not all([std_name, std_type]):
return JsonResponse({'msg': '请求参数缺失', 'code': 3000})
# post内容与数据库内容对比,如果内容一致则无需update
orgin_data = db_query(std_name, std_type)
if std_type == 'desc':
post_data = {'name': std_name, 'content': content}
if post_data == orgin_data:
return JsonResponse({'msg': '内容一致,无需修改', 'code': 1001})
else:
try:
# 把上一版本的数据标准内容存入到日志表
update_log = str(orgin_data.items() - post_data.items()) # 将被update替换的内容
sql = f"insert into data_standard_update_log(std_name, username, previous_version) values('{std_name}', '{username}', \"{update_log}\")"
curs.execute(sql)
conn.commit()
# 更新数据标准
sql = f"update data_standard_desc set name='{std_name}', content='{content}' where name='{std_name}'"
curs.execute(sql)
conn.commit()
curs.close()
conn.close()
return JsonResponse({'msg': '修改成功', 'code': 1000})
except Exception as e:
return JsonResponse({'msg': e, 'code': 2000})
elif std_type == 'detail':
post_data = {
'name': std_name,
'en_name': en_name,
'business_definition': business_definition,
'business_rule': business_rule,
'std_source': std_source,
'data_type': data_type,
'data_format': data_format,
'code_rule': code_rule,
'code_range': code_range,
'code_meaning': code_meaning,
'business_range': business_range,
'dept': dept,
'system': system,
}
if post_data == db_query(std_name, std_type):
return JsonResponse({'msg': '内容一致,无需修改', 'code': 1001})
else:
try:
# 把上一版本的数据标准内容存入到日志表
update_log = str(orgin_data.items() - post_data.items()) # 将被update替换的内容
sql = f"insert into data_standard_update_log(std_name, username, previous_version) values('{std_name}', '{username}', \"{update_log}\")"
curs.execute(sql)
conn.commit()
sql = f"""update data_standard_detail set name = '{std_name}',
en_name = '{en_name}',
business_definition = '{business_definition}',
business_rule = '{business_rule}',
std_source = '{std_source}',
data_type = '{data_type}',
data_format = '{data_format}',
code_rule = '{code_rule}',
code_range = '{code_range}',
code_meaning = '{code_meaning}',
business_range = '{business_range}',
dept = '{dept}',
`system` = '{system}'
where name='{std_name}'"""
curs.execute(sql)
conn.commit()
curs.close()
conn.close()
return JsonResponse({'msg': '修改成功', 'code': 1000})
except Exception as e:
return JsonResponse({'msg': str(e), 'code': 2000})
# 获取数据标准目录
@require_http_methods(["GET"])
def query_index(request):
conn = db_config.mysql_connect()
curs = conn.cursor()
sql = "select idx_id, idx_pid,idx_name,is_open from data_standard_index"
curs.execute(sql)
result = curs.fetchall()
data = []
for i in result:
data.append({
'id': i[0],
'pId': i[1],
'name': i[2],
't': i[2],
'open': i[3]
})
curs.close()
conn.close()
return JsonResponse(data, safe=False)
|
11515698
|
import pusher
import scraper
import parameters
import time
MESSAGES = parameters.MESSAGES()
SOURCE_STATUS = parameters.SOURCE_STATUS()
if __name__ == "__main__":
# Run the batch
status = pusher.Status()
scraper = scraper.Scraper()
start_time = time.time()
print(MESSAGES.START_BATCH)
status.changeSourceStatus(SOURCE_STATUS.REBUILD)
scraper.pushAllDocs()
status.changeSourceStatus(SOURCE_STATUS.IDLE)
elapsed_time = round(time.time() - start_time,2)
print(MESSAGES.END_BATCH, elapsed_time, MESSAGES.BATCH_TIME_UNIT)
|
11515708
|
import os
from pathlib import Path
import yaml
# Find the global path of plantseg
plantseg_global_path = Path(__file__).parent.absolute()
# Create configs directory at startup
home_path = os.path.expanduser("~")
PLANTSEG_MODELS_DIR = ".plantseg_models"
configs_path = os.path.join(home_path, PLANTSEG_MODELS_DIR, "configs")
os.makedirs(configs_path, exist_ok=True)
# create custom zoo if does not exist
custom_zoo = os.path.join(home_path, PLANTSEG_MODELS_DIR, 'custom_zoo.yaml')
if not os.path.exists(custom_zoo):
with open(custom_zoo, 'w') as f:
yaml.dump({}, f)
# Resources directory
RESOURCES_DIR = "resources"
model_zoo_path = os.path.join(plantseg_global_path, RESOURCES_DIR, "models_zoo.yaml")
standard_config_template = os.path.join(plantseg_global_path, RESOURCES_DIR, "config_gui_template.yaml")
|
11515744
|
import pytest
from mythril.disassembler.disassembly import Disassembly
from mythril.laser.ethereum.state.environment import Environment
from mythril.laser.ethereum.state.machine_state import MachineState
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.world_state import WorldState
from mythril.laser.ethereum.instructions import Instruction
from mythril.laser.ethereum.transaction.transaction_models import (
MessageCallTransaction,
TransactionStartSignal,
)
from mythril.laser.ethereum.state.calldata import ConcreteCalldata
last_state = None
created_contract_account = None
def test_create():
world_state = WorldState()
account = world_state.create_account(balance=10, address=101)
account.code = Disassembly("60606060")
environment = Environment(account, None, None, None, None, None)
og_state = GlobalState(
world_state, environment, None, MachineState(gas_limit=8000000)
)
code_raw = []
code = "606060606060"
for i in range(len(code) // 2):
code_raw.append(int(code[2 * i : 2 * (i + 1)], 16))
calldata = ConcreteCalldata("1", code_raw)
environment.calldata = calldata
og_state.transaction_stack.append(
(MessageCallTransaction(world_state=WorldState(), gas_limit=8000000), None)
)
value = 3
og_state.mstate.stack = [6, 0, value]
instruction = Instruction("create", dynamic_loader=None)
og_state.mstate.memory.extend(100)
og_state.mstate.memory[0:6] = [96] * 6
# Act + Assert
with pytest.raises(TransactionStartSignal) as t:
_ = instruction.evaluate(og_state)[0]
assert t.value.transaction.call_value == value
assert t.value.transaction.code.bytecode == code
assert (
t.value.transaction.callee_account.address
== world_state._generate_new_address(account.address.value)
)
|
11515766
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import math
from spell_utils import skillCheck
# Captivating Melody: Complete Mage, p. 40
captivatingMelodyEnum = 3000
print "Registering Captivating Melody"
def CaptivatingMelodyRadial(attachee, args, evt_obj):
isAdded = attachee.condition_add_with_args("Captivating Melody Effect",0,0, 0, 0) # adds the "Captivating Melody" condition on first radial menu build
radialAction = tpdp.RadialMenuEntryPythonAction(-1, D20A_PYTHON_ACTION, captivatingMelodyEnum, 0, "TAG_INTERFACE_HELP")
radialAction.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Feats)
return 0
def OnCaptivatingMelodyCheck(attachee, args, evt_obj):
#Get the current number of charges
MusicCharges = attachee.d20_query("Current Bardic Music")
#Check for remaining bardic music uses
if (MusicCharges < 1):
evt_obj.return_val = AEC_OUT_OF_CHARGES
return 0
#Don't allow a second use in a single round
if args.get_arg(0):
evt_obj.return_val = AEC_INVALID_ACTION
return 0
return 1
def CaptivatingMelodyPerform(attachee, args, evt_obj):
#Set to active
args.set_arg(0, 1)
#Deduct a turn undead charge
attachee.d20_send_signal("Deduct Bardic Music Charge")
return 0
def CaptivatingMelodyBeginRound(attachee, args, evt_obj):
args.set_arg(0, 0) # always remove at the begining of the round
args.set_arg(1, 0) # no spells cast this round
return 0
def CaptivatingMelodyDCBonus(attachee, args, evt_obj):
# not active, do nothing
if not args.get_arg(0):
return 0
#If this is not the first spell cast, don't apply the bonus
if args.get_arg(2) > 0:
return 0
#Must be the appropraite type of spell
spell_enum = evt_obj.spell_packet.spell_enum
if (spell_enum == 0):
return 0
spell_entry = tpdp.SpellEntry(spell_enum)
if spell_entry.spell_school_enum != Enchantment and spell_entry.spell_school_enum != Illusion:
return 0
if evt_obj.spell_packet.get_spell_casting_class() != stat_level_bard:
return 0
#Make a perform check
performDC = evt_obj.spell_packet.spell_known_slot_level + 15
result = skillCheck(attachee, skill_perform, performDC)
if result == False:
return 0
#Finally add the bonus
evt_obj.bonus_list.add(2, 0, "Captivating Melody")
return 0
def CaptivatingMelodyTooltip(attachee, args, evt_obj):
# not active, do nothing
if not args.get_arg(0):
return 0
# Set the tooltip
evt_obj.append("Captivating Melody")
return 0
def CaptivatingMelodyEffectTooltip(attachee, args, evt_obj):
# not active, do nothing
if not args.get_arg(0):
return 0
#Once a spell has been cast disable the tooltip
if args.get_arg(2):
return 0
# Set the tooltip
evt_obj.append(tpdp.hash("CAPTIVATING_MELODY"), -2, "")
return 0
def CaptivatingMelodyCastSpell(attachee, args, evt_obj):
# not active, do nothing
if not args.get_arg(0):
return 0
#Incriment the spell cast with spell power count
sepllCastCount = args.get_arg(1)
args.set_arg(1, sepllCastCount + 1)
return 0
#Setup the feat
CaptivatingMelodyFeat = PythonModifier("Captivating Melody Feat", 4)
CaptivatingMelodyFeat.MapToFeat("Captivating Melody")
CaptivatingMelodyFeat.AddHook(ET_OnBuildRadialMenuEntry, EK_NONE, CaptivatingMelodyRadial, ())
#Setup the effect
CaptivatingMelodyEffect = PythonModifier("Captivating Melody Effect", 4) #Enabled, Spell Cast, Extra, Extra
CaptivatingMelodyEffect.AddHook(ET_OnD20PythonActionCheck, captivatingMelodyEnum, OnCaptivatingMelodyCheck, ())
CaptivatingMelodyEffect.AddHook(ET_OnD20PythonActionPerform, captivatingMelodyEnum, CaptivatingMelodyPerform, ())
CaptivatingMelodyEffect.AddHook(ET_OnBeginRound, EK_NONE, CaptivatingMelodyBeginRound, ())
CaptivatingMelodyEffect.AddHook(ET_OnGetTooltip, EK_NONE, CaptivatingMelodyTooltip, ())
CaptivatingMelodyEffect.AddHook(ET_OnTargetSpellDCBonus, EK_NONE, CaptivatingMelodyDCBonus, ())
CaptivatingMelodyEffect.AddHook(ET_OnGetEffectTooltip, EK_NONE, CaptivatingMelodyEffectTooltip, ())
CaptivatingMelodyEffect.AddHook(ET_OnD20Signal, EK_S_Spell_Cast, CaptivatingMelodyCastSpell, ())
|
11515815
|
import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .decoder import Decoder
from .encoder import Encoder
from selfModules.embedding import Embedding
from utils.functional import kld_coef, parameters_allocation_check, fold
from beam_search import Beam
class RVAE(nn.Module):
def __init__(self, params, params_2):
super(RVAE, self).__init__()
self.params = params
self.params_2 = params_2 # Encoder-2 parameters
self.embedding = Embedding(self.params, '')
self.embedding_2 = Embedding(self.params_2, '', True)
self.encoder = Encoder(self.params)
self.encoder_2 = Encoder(self.params_2)
#
self.context_to_mu = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
self.context_to_logvar = nn.Linear(self.params.encoder_rnn_size * 2, self.params.latent_variable_size)
# self.encoder_3 = Encoder(self.params)
self.decoder = Decoder(self.params_2) # change this to params_2
def forward(self, drop_prob,
encoder_word_input=None, encoder_character_input=None,
encoder_word_input_2=None, encoder_character_input_2=None,
decoder_word_input_2=None, decoder_character_input_2=None,
z=None, initial_state=None):
assert parameters_allocation_check(self)
use_cuda = self.embedding.word_embed.weight.is_cuda
assert z is None and fold(lambda acc, parameter: acc and parameter is not None,
[encoder_word_input, encoder_character_input, decoder_word_input_2],
True) \
or (z is not None and decoder_word_input_2 is not None)
if z is None:
[batch_size, _] = encoder_word_input.size()
encoder_input = self.embedding(encoder_word_input, encoder_character_input)
[batch_size_2, _] = encoder_word_input_2.size()
encoder_input_2 = self.embedding_2(encoder_word_input_2, encoder_character_input_2)
context, h_0, c_0 = self.encoder(encoder_input, None)
State = (h_0, c_0)
context_2, _, _ = self.encoder_2(encoder_input_2, State)
mu = self.context_to_mu(context_2)
logvar = self.context_to_logvar(context_2)
std = t.exp(0.5 * logvar)
z = Variable(t.randn([batch_size, self.params.latent_variable_size]))
if use_cuda:
z = z.cuda()
z = z * std + mu
kld = (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean().squeeze()
# encoder_input = self.embedding(encoder_word_input, encoder_character_input)
# _ , h_0 , c_0 = self.encoder_3(encoder_input, None)
initial_state = State
else:
kld = None
mu = None
std = None
decoder_input_2 = self.embedding_2.word_embed(decoder_word_input_2)
out, final_state = self.decoder(decoder_input_2, z, drop_prob, initial_state)
return out, final_state, kld, mu, std
def learnable_parameters(self):
return [p for p in self.parameters() if p.requires_grad]
def trainer(self, optimizer, batch_loader, batch_loader_2):
def train(i, batch_size, use_cuda, dropout, start_index):
input = batch_loader.next_batch(batch_size, 'train', start_index)
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
input_2 = batch_loader_2.next_batch(batch_size, 'train', start_index)
input_2 = [Variable(t.from_numpy(var)) for var in input_2]
input_2 = [var.long() for var in input_2]
input_2 = [var.cuda() if use_cuda else var for var in input_2]
[encoder_word_input_2, encoder_character_input_2, decoder_word_input_2, decoder_character_input_2,
target] = input_2
logits, _, kld, _, _ = self(dropout,
encoder_word_input, encoder_character_input,
encoder_word_input_2, encoder_character_input_2,
decoder_word_input_2, decoder_character_input_2,
z=None)
# logits = logits.view(-1, self.params.word_vocab_size)
logits = logits.view(-1, self.params_2.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
loss = 79 * cross_entropy + kld_coef(i) * kld
optimizer.zero_grad()
loss.backward()
optimizer.step()
return cross_entropy, kld, kld_coef(i)
return train
def validater(self, batch_loader, batch_loader_2):
def validate(batch_size, use_cuda, start_index):
input = batch_loader.next_batch(batch_size, 'valid', start_index)
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
input_2 = batch_loader_2.next_batch(batch_size, 'valid', start_index)
input_2 = [Variable(t.from_numpy(var)) for var in input_2]
input_2 = [var.long() for var in input_2]
input_2 = [var.cuda() if use_cuda else var for var in input_2]
[encoder_word_input_2, encoder_character_input_2, decoder_word_input_2, decoder_character_input_2,
target] = input_2
logits, _, kld, _, _ = self(0.,
encoder_word_input, encoder_character_input,
encoder_word_input_2, encoder_character_input_2,
decoder_word_input_2, decoder_character_input_2,
z=None)
logits = logits.view(-1, self.params_2.word_vocab_size)
target = target.view(-1)
cross_entropy = F.cross_entropy(logits, target)
return cross_entropy, kld
return validate
def sample(self, batch_loader, seq_len, seed, use_cuda, State):
if use_cuda:
seed = seed.cuda()
decoder_word_input_np, decoder_character_input_np = batch_loader.go_input(1)
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
result = ''
initial_state = State
for i in range(seq_len):
logits, initial_state, _, _, _ = self(0., None, None,
None, None,
decoder_word_input, decoder_character_input,
seed, initial_state)
# forward(self, drop_prob,
# encoder_word_input=None, encoder_character_input=None,
# encoder_word_input_2=None, encoder_character_input_2=None,
# decoder_word_input_2=None, decoder_character_input_2=None,
# z=None, initial_state=None):
# logits = logits.view(-1, self.params.word_vocab_size)
# logits = logits.view(-1, self.params.word_vocab_size)
logits = logits.view(-1, self.params_2.word_vocab_size)
# print '---------------------------------------'
# print 'Printing logits'
# print logits
# print '------------------------------------------'
prediction = F.softmax(logits)
word = batch_loader.sample_word_from_distribution(prediction.data.cpu().numpy()[-1])
if word == batch_loader.end_token:
break
result += ' ' + word
decoder_word_input_np = np.array([[batch_loader.word_to_idx[word]]])
decoder_character_input_np = np.array([[batch_loader.encode_characters(word)]])
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
return result
def sampler(self, batch_loader, batch_loader_2, seq_len, seed, use_cuda, i, beam_size, n_best):
input = batch_loader.next_batch(1, 'valid', i)
input = [Variable(t.from_numpy(var)) for var in input]
input = [var.long() for var in input]
input = [var.cuda() if use_cuda else var for var in input]
[encoder_word_input, encoder_character_input, decoder_word_input, decoder_character_input, target] = input
encoder_input = self.embedding(encoder_word_input, encoder_character_input)
_, h0, c0 = self.encoder(encoder_input, None)
State = (h0, c0)
results, scores = self.sample_beam(batch_loader_2, seq_len, seed, use_cuda, State, beam_size, n_best)
return results, scores
def sample_beam(self, batch_loader, seq_len, seed, use_cuda, State, beam_size, n_best):
# seed = Variable(t.from_numpy(seed).float())
if use_cuda:
seed = seed.cuda()
decoder_word_input_np, decoder_character_input_np = batch_loader.go_input(1)
decoder_word_input = Variable(t.from_numpy(decoder_word_input_np).long())
decoder_character_input = Variable(t.from_numpy(decoder_character_input_np).long())
if use_cuda:
decoder_word_input, decoder_character_input = decoder_word_input.cuda(), decoder_character_input.cuda()
dec_states = State
dec_states = [
dec_states[0].repeat(1, beam_size, 1),
dec_states[1].repeat(1, beam_size, 1)
]
drop_prob = 0.0
beam_size = beam_size
batch_size = 1
beam = [Beam(beam_size, batch_loader, cuda=True) for k in range(batch_size)]
batch_idx = list(range(batch_size))
remaining_sents = batch_size
for i in range(seq_len):
input = t.stack(
[b.get_current_state() for b in beam if not b.done]
).t().contiguous().view(1, -1)
trg_emb = self.embedding_2.word_embed(Variable(input).transpose(1, 0))
# print trg_emb.size()
# print seed.size()
trg_h, dec_states = self.decoder.only_decoder_beam(trg_emb, seed, drop_prob, dec_states)
dec_out = trg_h.squeeze(1)
# print "dec_out:", dec_out.size()
out = F.softmax(self.decoder.fc(dec_out)).unsqueeze(0)
word_lk = out.view(
beam_size,
remaining_sents,
-1
).transpose(0, 1).contiguous()
active = []
for b in range(batch_size):
if beam[b].done:
continue
idx = batch_idx[b]
if not beam[b].advance(word_lk.data[idx]):
active += [b]
for dec_state in dec_states: # iterate over h, c
# layers x beam*sent x dim
sent_states = dec_state.view(
-1, beam_size, remaining_sents, dec_state.size(2)
)[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(
1,
beam[b].get_current_origin()
)
)
if not active:
break
active_idx = t.cuda.LongTensor([batch_idx[k] for k in active])
batch_idx = {beam: idx for idx, beam in enumerate(active)}
def update_active(t):
view = t.data.view(
-1, remaining_sents,
self.params.decoder_rnn_size
)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) \
// remaining_sents
return Variable(view.index_select(
1, active_idx
).view(*new_size))
dec_states = (
update_active(dec_states[0]),
update_active(dec_states[1])
)
dec_out = update_active(dec_out)
remaining_sents = len(active)
allHyp, allScores = [], []
for b in range(batch_size):
scores, ks = beam[b].sort_best()
allScores += [scores[:n_best]]
hyps = zip(*[beam[b].get_hyp(k) for k in ks[:n_best]])
allHyp += [hyps]
return allHyp, allScores
|
11515828
|
from .tool.func import *
def list_admin_group_2(conn):
curs = conn.cursor()
list_data = '<ul class="inside_ul">'
org_acl_list = get_default_admin_group()
curs.execute(db_change("select distinct name from alist order by name asc"))
for data in curs.fetchall():
if admin_check() == 1 and \
not data[0] in org_acl_list:
delete_admin_group = ' <a href="/delete_admin_group/' + url_pas(data[0]) + '">(' + load_lang("delete") + ')</a>'
else:
delete_admin_group = ''
list_data += '' + \
'<li>' + \
'<a href="/admin_plus/' + url_pas(data[0]) + '">' + data[0] + '</a>' + \
delete_admin_group + \
'</li>' + \
''
list_data += '' + \
'</ul>' + \
'<hr class="main_hr">' + \
'<a href="/manager/8">(' + load_lang('add') + ')</a>' + \
''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('admin_group_list'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = list_data,
menu = [['manager', load_lang('return')]]
))
|
11515863
|
import os.path
import json
import logging
from itertools import chain
from django.conf import settings
from django.utils.module_loading import import_string
from django.db.models import Q
from django.contrib.staticfiles.storage import staticfiles_storage
from wazimap.models import Geography
log = logging.getLogger(__name__)
# GDAL is difficult to install, so we make it an optional dependency.
# Here, we check if it's installed and warn if it isn't.
try:
import osgeo.gdal # noqa
HAS_GDAL = True
except ImportError:
HAS_GDAL = False
class LocationNotFound(Exception):
pass
class GeoData(object):
""" General Wazimap geography helper object.
This object helps Wazimap load geographies, navigate geo level hierarchies,
find locations, etc. It's a good place to override this functionality
if you want to use a different geometry setup.
To override behaviour, implement your own GeoData object (probably inheriting
from this one), then set the `WAZIMAP['geodata']` to the dotted path of your
new class in your `settings.py`. Wazimap will then load that class and make
it available as `wazimap.geo.geo_data`.
"""
_versions = None
def __init__(self):
self.geo_model = Geography
self.setup_levels()
self.setup_geometry()
self._default_version = None
self._versions = None
self._global_latest_version = None
def _setup_versions(self):
""" Find all the geography versions.
"""
self._versions = [x['version'] for x in self.geo_model.objects.values('version').distinct().all()]
self._global_latest_version = sorted(self.versions)[-1]
# _default_version = None means fall back to whatever is latest for geography
self._default_version = settings.WAZIMAP['default_geo_version']
@property
def versions(self):
if self._versions is None:
self._setup_versions()
return self._versions
@property
def global_latest_version(self):
if self._global_latest_version is None:
self._setup_versions()
return self._global_latest_version
@property
def default_version(self):
if self._default_version is None:
self._setup_versions()
return self._default_version
def setup_levels(self):
""" Setup the summary level hierarchy from the `WAZIMAP['levels']` and
`WAZIMAP['comparative_levels']` settings.
"""
self.comparative_levels = ['this'] + settings.WAZIMAP['comparative_levels']
self.geo_levels = settings.WAZIMAP['levels']
parents = {}
for code, level in self.geo_levels.items():
level.setdefault('name', code)
level.setdefault('plural', code + 's')
level.setdefault('children', [])
level['sumlev'] = code
for kid in level['children']:
parents.setdefault(kid, []).append(code)
# fold in the ancestors
def climb(code):
return chain(parents.get(code, []), *[climb(p) for p in parents.get(code, [])])
for code, items in parents.items():
self.geo_levels[code]['ancestors'] = list(set(climb(code)))
# root level
roots = [key for key, lev in self.geo_levels.items() if not lev.get('ancestors')]
if not roots or len(roots) > 1:
raise ValueError("geo_levels must have a single root item, but we found: %s" % roots)
self.root_level = roots[0]
def setup_geometry(self):
""" Load boundaries from geojson shape files.
"""
# map from levels to a dict of geoid-keyed feature
# objects, including their geometry as shapely shapes
#
# eg.
#
# {
# 'province': {
# 'GT': {
# 'properties': { ... },
# 'shape': <shapely shape>
# }
# }
# }
#
self.geometry = {}
self.geometry_files = settings.WAZIMAP.get('geometry_data', {})
for level in self.geo_levels.keys():
# sanity check for geo version
if level in self.geometry_files or self.geometry_files.keys() == [''] and isinstance(self.geometry_files[''], basestring):
# The geometry_data must include a version key. For example:
#
# geometry_data = {
# '2011': {
# 'province': 'geo/2011/country.geojson',
# 'country': 'geo/2011/country.geojson',
# }, {
# '2016': {
# 'province': 'geo/2016/country.geojson',
# 'country': 'geo/2016/country.geojson',
# }
# }
#
# If you aren't using geo versioning, then use the default geo
# version '' as the first key:
#
# geometry_data = {
# '': {
# 'province': 'geo/2011/country.geojson',
# 'country': 'geo/2011/country.geojson',
# }
# }
suggestion = {'': self.geometry_files}
raise ValueError("The geometry_data setting is missing a geometry version key. You probably aren't using geometry versions just need to " +
"change WAZIMAP['geometry_data'] to be: %s" % suggestion)
for version in self.geometry_files.keys():
fname, js = self.load_geojson_for_level(level, version)
if not js:
continue
if js['type'] != 'FeatureCollection':
raise ValueError("GeoJSON files must contain a FeatureCollection. The file %s has type %s" % (fname, js['type']))
level_detail = self.geometry.setdefault(version, {}).setdefault(level, {})
for feature in js['features']:
props = feature['properties']
shape = None
if HAS_GDAL and feature['geometry']:
from shapely.geometry import asShape
try:
shape = asShape(feature['geometry'])
except ValueError as e:
log.error("Error parsing geometry for %s-%s from %s: %s. Feature: %s"
% (level, props['code'], fname, e.message, feature), exc_info=e)
raise e
level_detail[props['code']] = {
'properties': props,
'shape': shape
}
def load_geojson_for_level(self, level, version):
files = self.geometry_files[version]
fname = files.get(level, files.get(''))
if not fname:
return None, None
# we have to have geojson
name, ext = os.path.splitext(fname)
if ext != '.geojson':
fname = name + '.geojson'
fname = staticfiles_storage.path(fname)
# try load it
try:
with open(fname, 'r') as f:
return fname, json.load(f)
except IOError as e:
if e.errno == 2:
log.warn("Couldn't open geometry file %s -- no geometry will be available for level %s and version '%s'" % (fname, level, version))
else:
raise e
return None, None
def root_geography(self, version=None):
""" First geography with no parents. """
query = self.geo_model.objects.filter(parent_level=None, parent_code=None, geo_level=self.root_level)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
return query.first()
def get_geography(self, geo_code, geo_level, version=None):
""" Get a geography object for this geography, or raise LocationNotFound if it doesn't exist.
If a version is given, find a geography with that version. Otherwise find the most recent version.
"""
query = self.geo_model.objects.filter(geo_level=geo_level, geo_code=geo_code)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
geo = query.first()
if not geo:
raise LocationNotFound("Invalid level, code and version: %s-%s '%s'" % (geo_level, geo_code, version))
return geo
def get_geometry(self, geo):
""" Get the geometry description for a geography. This is a dict
with two keys, 'properties' which is a dict of properties,
and 'shape' which is a shapely shape (may be None).
"""
return self.geometry.get(geo.version, {}).get(geo.geo_level, {}).get(geo.geo_code)
def get_locations(self, search_term, levels=None, version=None):
"""
Try to find locations based on a search term, possibly limited
to +levels+.
Returns an ordered list of geo models.
"""
search_term = search_term.strip()
query = self.geo_model.objects.filter(
Q(name__icontains=search_term) | Q(geo_code=search_term.upper())
).distinct("name")
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
if levels:
query = query.filter(geo_level__in=levels)
# TODO: order by level?
objects = sorted(query[:10], key=lambda o: [o.geo_level, o.name, o.geo_code])
return objects
def get_locations_from_coords(self, longitude, latitude, levels=None, version=None):
"""
Returns a list of geographies containing this point.
"""
if not HAS_GDAL:
gdal_missing(critical=True)
from shapely.geometry import Point
p = Point(float(longitude), float(latitude))
geos = []
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
for features in self.geometry.values():
for feature in features.values():
feature_key = list(feature.keys())[0]
if feature[feature_key]['shape'] and feature[feature_key]['shape'].contains(p):
geo = self.get_geography(feature[feature_key]['properties']['code'],
feature[feature_key]['properties']['level'],
version)
if not levels or geo.geo_level in levels:
geos.append(geo)
return geos
def get_summary_geo_info(self, geo):
""" Get a list of (level, code) tuples of geographies that
this geography should be compared against.
This is the intersection of +comparative_levels+ and the
ancestors of the geography.
"""
ancestors = {g.geo_level: g for g in geo.ancestors()}
return [(lev, ancestors[lev].geo_code) for lev in self.comparative_levels if lev in ancestors]
def get_comparative_geos(self, geo):
""" Get a list of geographies to be used as comparisons for +geo+.
"""
return [self.get_geography(code, level, geo.version) for level, code in self.get_summary_geo_info(geo)]
def first_child_level(self):
# first child level in the hierarchy
return self.geo_levels[self.root_level]['children'][0]
def primary_release_year(self, geo):
""" Return the primary release year to use for the provided geography.
This uses the `WAZIMAP['primary_release_year']` setting to lookup the
year based on the geo's level, and defaults to `latest`.
"""
return settings.WAZIMAP['primary_release_year'].get(geo.geo_level, 'latest')
geo_data = import_string(settings.WAZIMAP['geodata'])()
def gdal_missing(critical=False):
log.warn("NOTE: Wazimap is unable to load GDAL, it's probably not installed. "
"Some functionality such as data downloads and geolocation won't work. This is ok in development, but "
"is a problem in production. For more information on installing GDAL, see http://wazimap.readthedocs.io/en/latest/")
if critical:
raise Exception("GDAL must be installed for this functionality to work.")
|
11515871
|
import copy
import json
import pycountry
from django.conf import settings
from django import get_version, forms
from django import utils
from django.forms import Widget
from django.forms.utils import flatatt
class SplitJSONWidgetBase(forms.Widget):
css_textfield_class = 'vTextField'
del_btn = ('<button type="button" class="button"'
'style="background: #b66064;" '
'onclick="django.jQuery(this).parent().remove(); return 0" >'
' Del'
'</button>')
add_btn_tmpl = """
<div>
<button type="button" class="button" onclick="{}" >Add {}</button>
</div>
"""
li_row_tmpl = """
if (window.js_cnt == undefined) {{
window.js_cnt = Math.floor(Math.random()*10000000) + 1;
}}
window.js_cnt += 1;
init_id = '{}';
name = '{}';
new_li_id = 'li_'+name+'_'+window.js_cnt;
name_regexp = /{}\[\d*\]/g;
row_li = \'{}\'.replace(init_id, new_li_id).replace(name_regexp, name+'['+ window.js_cnt +']');
django.jQuery('#{}').append(row_li);return 0
"""
source_label = '<label >Source data:</label> {}'
class SplitJSONWidget(SplitJSONWidgetBase):
def __init__(self, attrs=None, kwargs=None, debug=True):
self.debug = debug
self.id_cnt = 0
super().__init__(attrs)
def _embed_js(self, value):
return value.replace('"', "'").replace("'", "\\'")
def _get_id_cnt(self):
self.id_cnt += 1
return self.id_cnt
def _get_name_prefix(self, name):
return 'ul_{}'.format(name)
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
if value:
attrs['value'] = value
if name:
attrs['class'] = "{} input_{}".format(self.css_textfield_class,
name)
attrs['name'] = '{}[{}]'.format(name, self._get_id_cnt())
f = flatatt(attrs)
return '<input {} />'.format(f)
def _get_single_fields(self, name, value):
inputs = []
if isinstance(value, list):
for v in sorted(value):
inputs.append(self._as_text_field(name,
v))
elif isinstance(value, (str, int, float)):
inputs.append(self._as_text_field(name, value))
elif value is None:
inputs.append(self._as_text_field(name, ''))
return inputs
def _prepare_as_ul(self, name, l):
ul_id = self._get_name_prefix(name)
result = '<ul id="{}" >{}</ul>'
row = ''
cnt = 0
for el in l:
row_id = 'li_{}_{}'.format(name, cnt)
row += '<li id="{}">{} {}</li>'.format(row_id , el, self.del_btn)
cnt += 1
return result.format(ul_id, row)
def _get_add_btn(self, name):
# add button render. TODO: get it from a method!
ul_id = self._get_name_prefix(name)
input_field = self._as_text_field(name, '')
del_btn = self.del_btn
init_id = 'init_id'
li = """<li id='{}'>{}{}</li>""".format(init_id, input_field, del_btn)
cleaned_li = self._embed_js(li)
add_li_js = self.li_row_tmpl.format(init_id,
name, name,
cleaned_li,
ul_id).replace('\n', '').replace(' '*2, ' ')
add_btn = self.add_btn_tmpl.format(add_li_js, name.title().replace('_', ' '))
# end add button
return add_btn
def render(self, name, value, attrs=None, renderer=None):
add_btn = self._get_add_btn(name)
inputs = self._get_single_fields(name, value or {})
result = self._prepare_as_ul(name, inputs)
if self.debug:
# render json as well
source_data = self.source_label.format(value)
result = '{}{}'.format(result, source_data)
result += add_btn
return utils.safestring.mark_safe(result)
class SchacPersonalUniqueIdWidget(SplitJSONWidget, forms.Widget):
"""
urn:schac:personalUniqueID:it:CF:
"""
li_row_tmpl = """
if (window.js_cnt == undefined) {{
window.js_cnt = Math.floor(Math.random()*10000000) + 1;
}}
window.js_cnt += 1;
init_id = '{}';
name = '{}';
new_li_id = 'li_'+name+'_'+window.js_cnt;
name_regexp = /\[(\d*)\]/g;
row_li = \'{}\';
row_li_changed = row_li.replace(init_id, new_li_id).replace(name_regexp, '['+ window.js_cnt +']');
django.jQuery('#{}').append(row_li_changed);return 0
"""
def _get_add_btn(self, name):
# add button render. TODO: get it from a method!
ul_id = self._get_name_prefix(name)
input_field = self._as_text_field(name, '')
del_btn = self.del_btn
init_id = 'init_id'
li = """<li id='{}'>{}{}</li>""".format(init_id, input_field, del_btn)
cleaned_li = self._embed_js(li)
add_li_js = self.li_row_tmpl.format(init_id,
name,
cleaned_li,
ul_id).replace('\n', '').replace(' '*2, ' ')
add_btn = self.add_btn_tmpl.format(add_li_js, name.title().replace('_', ' '))
# end add button
return add_btn
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
l_value = [settings.SCHAC_PERSONALUNIQUEID_DEFAULT_PREFIX,
settings.SCHAC_PERSONALUNIQUEID_DEFAULT_COUNTRYCODE,
settings.SCHAC_PERSONALUNIQUEID_DOCUMENT_CODE[0]]
if value:
sv = value.split(':')
if len(sv) > 4:
l_value.append(sv[-1])
l_value[1] = sv[-3]
l_value[2] = sv[-2]
value = l_value[3]
else:
value = ''
row_id = self._get_id_cnt()
static_prefix = "<input style='width: 170px;' class='vTextField' value='{}' name='{}_1_[{}]' disabled>".format(l_value[0],
name,
row_id)
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list = ['<option value="{}" selected>{}</option>'.format(l_value[1],
l_value[1]),]
fout_countries = [e for e in pycountry.countries if e != settings.SCHAC_PERSONALUNIQUEID_DEFAULT_COUNTRYCODE]
select_1_options_list.extend([option_1_tmpl.format(i.alpha_2, i.alpha_2) for i in fout_countries])
select_1_options_list.extend([option_1_tmpl.format(ele, ele) for ele in ('EU', 'INT')])
select_1 = select_1_tmpl.format('{}_2_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
select_2_tmpl = """<select name={} {}>
{}
</select>"""
option_2_tmpl = """<option value="{}">{}</option>
"""
select_2_options_list = ['<option value="{}" selected>{}</option>'.format(l_value[2],
l_value[2]),]
select_2_options_list.extend([option_2_tmpl.format(i, i) for i in settings.SCHAC_PERSONALUNIQUEID_DOCUMENT_CODE[1:]])
select_2 = select_2_tmpl.format('{}_3_[{}]'.format(name, row_id), '', ''.join(select_2_options_list))
input_suffix = "<input style='width: 170px;' class='vTextField' value='{}' name='{}_4_[{}]'>".format(value,
name,
row_id)
return static_prefix+select_1+select_2+input_suffix
def _get_single_fields(self, name, value):
inputs = []
if isinstance(value, list):
for v in sorted(value):
inputs.append(self._as_text_field(name,
v))
elif isinstance(value, (str, int, float)):
inputs.append(self._as_text_field(name, value))
elif value is None:
inputs.append(self._as_text_field(name, ''))
return inputs
def render(self, name, value, attrs=None, renderer=None):
add_btn = self._get_add_btn(name)
inputs = self._get_single_fields(name, value or {})
result = self._prepare_as_ul(name, inputs)
if self.debug:
# render json as well
source_data = self.source_label.format(value)
result = '{}{}'.format(result, source_data)
result += add_btn
return utils.safestring.mark_safe(result)
class SchacPersonalUniqueCodeWidget(SchacPersonalUniqueIdWidget):
"""
# Example: schacPersonalUniqueCode: urn:mace:terena.org:schac:personalUniqueCode:fi:tut.fi:student:165934
# schacPersonalUniqueCode: urn:mace:terena.org:schac:personalUniqueCode:es:uma:estudiante:a3b123c12
# schacPersonalUniqueCode: urn:mace:terena.org:schac:personalUniqueCode:se:LIN:87654321
"""
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
l_value = [settings.SCHAC_PERSONALUNIQUECODE_DEFAULT_PREFIX,
settings.SCHAC_PERSONALUNIQUEID_DEFAULT_COUNTRYCODE,
]
value = value.replace(settings.SCHAC_PERSONALUNIQUECODE_DEFAULT_PREFIX, '')[1:]
sv = value.split(':')
if len(sv) > 2:
if len(sv) > 2:
l_value.append(sv[-1])
l_value[1] = sv[0]
value = ':'.join(sv[1:])
l_value[2] = value
else:
value = ''
row_id = self._get_id_cnt()
static_prefix = "<input style='width: 285px;' class='vTextField' value='{}' name='{}_1_[{}]' disabled>".format(l_value[0],
name,
row_id)
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list = ['<option value="{}" selected>{}</option>'.format(l_value[1],
l_value[1]),]
fout_countries = [e for e in pycountry.countries if e != settings.SCHAC_PERSONALUNIQUECODE_DEFAULT_PREFIX ]
select_1_options_list.extend([option_1_tmpl.format(i.alpha_2, i.alpha_2) for i in fout_countries])
select_1_options_list.extend([option_1_tmpl.format(ele, ele) for ele in ('EU', 'INT')])
select_1 = select_1_tmpl.format('{}_2_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
input_suffix = "<input style='width: 170px;' class='vTextField' value='{}' name='{}_4_[{}]'>".format(value,
name,
row_id)
return static_prefix+select_1+input_suffix
class SchacHomeOrganizationTypeWidget(SchacPersonalUniqueIdWidget):
"""
urn:schac:homeOrganizationType:<country-code>:university (SCHAC) - SWITCHaai(CH)
"""
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
l_value = [settings.SCHAC_HOMEORGANIZATIONTYPE_DEFAULT_PREFIX,
settings.SCHAC_PERSONALUNIQUEID_DEFAULT_COUNTRYCODE,
]
if value:
sv = value.replace(settings.SCHAC_HOMEORGANIZATIONTYPE_DEFAULT_PREFIX, '').split(':')[1:]
if len(sv) > 1:
l_value.append(sv[-1])
l_value[1] = sv[0]
value = sv[1]
else:
value = ''
row_id = self._get_id_cnt()
static_prefix = "<input style='width: 285px;' class='vTextField' value='{}' name='{}_1_[{}]' disabled>".format(l_value[0],
name,
row_id)
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list = ['<option value="{}" selected>{}</option>'.format(l_value[1],
l_value[1]),]
fout_countries = [e for e in pycountry.countries if e != settings.SCHAC_HOMEORGANIZATIONTYPE_DEFAULT_PREFIX ]
select_1_options_list.extend([option_1_tmpl.format(i.alpha_2, i.alpha_2) for i in fout_countries])
select_1_options_list.extend([option_1_tmpl.format(ele, ele) for ele in ('EU', 'INT')])
select_1 = select_1_tmpl.format('{}_2_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
input_suffix = "<input style='width: 170px;' class='vTextField' value='{}' name='{}_4_[{}]'>".format(value,
name,
row_id)
return static_prefix+select_1+input_suffix
class eduPersonAffiliationWidget(SchacPersonalUniqueIdWidget):
"""
faculty, student, staff, alum, member, affiliate, employee, library-walk-in
"""
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
l_value = []
if not value:
value = ''
row_id = self._get_id_cnt()
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list = ['<option value="{}" selected>{}</option>'.format(value, value)]
select_1_options_list.extend([option_1_tmpl.format(i[0], i[0]) for i in settings.AFFILIATION])
select_1 = select_1_tmpl.format('{}_1_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
return select_1
class eduPersonScopedAffiliationWidget(SchacPersonalUniqueIdWidget):
"""
faculty, student, staff, alum, member, affiliate, employee, library-walk-in
"""
scoped_symbol = '@'
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
if value:
l_value = value.split(self.scoped_symbol)
rendered_value = self.scoped_symbol.join((l_value[0], l_value[1]))
select_1_options_list = ['<option value="{}" selected>{}</option>'\
.format(l_value[0]+self.scoped_symbol, l_value[0]+self.scoped_symbol)]
else:
l_value = ['', '']
rendered_value = ''
select_1_options_list = ['<option value="{}" selected>{}</option>'\
.format(l_value[0], l_value[0])]
row_id = self._get_id_cnt()
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list.extend([option_1_tmpl.format(i[0]+self.scoped_symbol, i[0]+self.scoped_symbol) for i in settings.AFFILIATION])
select_1 = select_1_tmpl.format('{}_1_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
input_suffix = "<input style='width: 170px;' class='vTextField' value='{}' name='{}_2_[{}]'>".format(l_value[1],
name,
row_id)
return select_1+input_suffix
class TitleWidget(SchacPersonalUniqueIdWidget):
"""
one of settings.LDAP_PEOPLES_TITLES
"""
def _as_text_field(self, name, value):
attrs = self.build_attrs(self.attrs)
l_value = []
if not value:
value = ''
row_id = self._get_id_cnt()
select_1_tmpl = """<select name={} {}>
{}
</select>"""
option_1_tmpl = """<option value="{}">{}</option>
"""
select_1_options_list = ['<option value="{}" selected>{}</option>'.format(value, value)]
select_1_options_list.extend([option_1_tmpl.format(i[0], i[0]) for i in settings.LDAP_PEOPLES_TITLES])
select_1 = select_1_tmpl.format('{}_1_[{}]'.format(name, row_id), '', ''.join(select_1_options_list))
return select_1
|
11515944
|
import os
import sys
import errno
import unittest
import time
from urllib import urlopen, urlencode
import sb_test_support
sb_test_support.fix_sys_path()
import sb_server
from spambayes.Options import options
default_shutdown_port = options["html_ui", "port"]
verbose = 0
def call_web_function(url, **kw):
got = urlopen(url, urlencode(kw)).read()
# Very simple - just look for tracebacks
if got.find("Traceback (most recent call last)")>=0:
print "FAILED calling URL", url
print got
raise AssertionError, "Opening URL %s appeared to fail" % (url,)
class Spawner:
def __init__(self, test_case, spawn_args):
self.test_case = test_case
self.spawn_args = spawn_args
# If the command is a .py file, insert an executable.
if os.path.splitext(self.spawn_args[0])[1]=='.py':
self.spawn_args.insert(0, sys.executable)
self.pid = None
def _spawn(self, args):
return os.spawnv(os.P_NOWAIT, self.spawn_args[0], self.spawn_args)
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def is_running(self):
if self.pid is None:
return False
# damn - we could implement os.waitpid correctly
# using the win32api - but as at 2.3, you can't check
# if a pid is still running with os.waitpid
if sys.platform.startswith("win32"):
import win32process # sorry, ya gotta have win32all to run tests
import win32con
try:
rc = win32process.GetExitCodeProcess(self.pid)
result = rc==win32con.STILL_ACTIVE
except win32process.error:
result = False
else:
try:
os.waitpid(self.pid, os.WNOHANG)
result = True
except os.error, details:
if details.errno == errno.ECHILD:
result = False
# other exceptions invalid?
raise
# Wait a few seconds for the global mutex to catch up
for i in range(20):
time.sleep(0.25)
if result==is_any_sb_server_running():
break
# Check the platform agrees (could do xor, but even I wont be able
# to read it in a few weeks <wink>
if result:
self.test_case.failUnless(is_any_sb_server_running(),
"My server stopped, but global server mutex held")
else:
self.test_case.failUnless(not is_any_sb_server_running(),
"My server running, but no global server mutex held")
return result
class Spawner_sb_server(Spawner):
def __init__(self, test_case, args, shutdown_port = default_shutdown_port):
self.shutdown_port = shutdown_port
f = sb_server.__file__
if f.endswith(".pyc") or f.endswith(".pyo"):
f = f[:-1]
Spawner.__init__(self, test_case, [f]+args)
def start(self):
self.test_case.failUnless(not is_any_sb_server_running(),
"Should be no server running")
if verbose > 1:
print "Spawning", self.spawn_args
self.pid = self._spawn(self.spawn_args)
# wait for it to start - 5 secs, 0.25 per check
for i in range(20):
time.sleep(0.25)
if verbose > 1:
print "Waiting for start flags: running=%s, global_mutex=%s" \
% (self.is_running(), is_any_sb_server_running())
if self.is_running() and is_any_sb_server_running():
return
# gave up waiting.
self.test_case.fail("sb_server appeared to not start")
def stop(self):
# Copied from sb_server.stop()
# Shutdown as though through the web UI. This will save the DB, allow
# any open proxy connections to complete, etc.
call_web_function('http://localhost:%d/save' % self.shutdown_port,
how='Save & shutdown')
# wait for it to stop - 5 secs, 0.25 per check
for i in range(20):
time.sleep(0.25)
if not self.is_running() and not is_any_sb_server_running():
# stopped - check the exit code
temp_pid, rc = os.waitpid(self.pid, 0)
if rc:
self.test_case.fail("sb_server returned exit code %s" % rc)
return
# gave up waiting.
self.test_case.fail("sb_server appeared to not stop")
def is_any_sb_server_running():
# reach into sb_server internals, as it is authoritative (sometimes <wink>)
try:
mutex = sb_server.open_platform_mutex()
sb_server.close_platform_mutex(mutex)
return False
except sb_server.AlreadyRunningException:
return True
class TestServer(unittest.TestCase):
def setUp(self):
self.failUnless(not is_any_sb_server_running(),
"Can't do sb_server tests while a server is running "\
"(platform mutex held)")
def tearDown(self):
# If we cause failure here, we mask the underlying error which left
# the server running - so just print the warning.
if is_any_sb_server_running():
print "WARNING:", self, "completed with the platform mutex held"
def _start_spawner(self, spawner):
self.failUnless(not spawner.is_running(),
"this spawneer can't be running")
spawner.start()
self.failUnless(spawner.is_running(),
"this spawner must be running after successful start")
self.failUnless(is_any_sb_server_running(),
"Platform mutex not held after starting")
def _stop_spawner(self, spawner):
self.failUnless(spawner.is_running(), "must be running to stop")
self.failUnless(is_any_sb_server_running(),
"Platform mutex must be held to stop")
spawner.stop()
self.failUnless(not spawner.is_running(), "didn't stop after stop")
self.failUnless(not is_any_sb_server_running(),
"Platform mutex still held after stop")
def test_sb_server_default(self):
# Should be using the default port from the options file.
from spambayes.Options import options
port = options["html_ui", "port"]
s = Spawner_sb_server(self, [])
self._start_spawner(s)
self._stop_spawner(s)
def test_sb_server_ui_port(self):
# Should be using the default port from the options file.
s = Spawner_sb_server(self, ["-u8899"], 8899)
self._start_spawner(s)
self._stop_spawner(s)
def test_sb_server_restore(self):
# Make sure we can do a restore defaults and shutdown without incident.
from spambayes.Options import options
port = options["html_ui", "port"]
s = Spawner_sb_server(self, [], shutdown_port=port)
self._start_spawner(s)
# do the reload
call_web_function('http://localhost:%d/restoredefaults' % port, how='')
self._stop_spawner(s)
if sys.platform.startswith("win"):
import win32service # You need win32all to run the tests!
import win32serviceutil
import winerror
service_name = "pop3proxy"
class TestService(unittest.TestCase):
def setUp(self):
try:
win32serviceutil.QueryServiceStatus(service_name)
except win32service.error, details:
if details[0]==winerror.ERROR_SERVICE_DOES_NOT_EXIST:
self.was_installed = False
raise
else:
self.was_installed = True
self.failUnless(not is_any_sb_server_running(),
"Can't do service tests while a server is running "\
"(platform mutex held)")
def tearDown(self):
if is_any_sb_server_running():
print "WARNING:", self, "completed with the platform mutex held"
def _start_service(self):
win32serviceutil.StartService(service_name)
for i in range(10):
time.sleep(0.5)
status = win32serviceutil.QueryServiceStatus(service_name)
if status[1] == win32service.SERVICE_RUNNING:
break
if verbose > 1:
print "Service status is %d - still waiting" % status[1]
else:
self.fail("Gave up waiting for service to start")
def _stop_service(self):
# StopServiceWithDeps checks the status of each service as it
# stops it, which is exactly what we want here.
win32serviceutil.StopServiceWithDeps(service_name)
def test_simple_startstop(self):
self._start_service()
self._stop_service()
def test_remote_shutdown(self):
self._start_service()
# Should be using the default port from the options file.
from spambayes.Options import options
port = options["html_ui", "port"]
call_web_function ('http://localhost:%d/save' % port,
how='Save & shutdown')
# wait for it to stop - 5 secs, 0.25 per check
for i in range(10):
time.sleep(0.5)
status = win32serviceutil.QueryServiceStatus(service_name)
if status[1] == win32service.SERVICE_STOPPED:
break
else:
self.fail("Gave up waiting for service to stop")
self.failUnless(not is_any_sb_server_running(),
"Should be no platform mutex held after stopping")
if __name__=='__main__':
sb_test_support.unittest_main()
|
11515963
|
import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setup_class(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setup_class(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
|
11515998
|
import json
import argparse
import copy
import joblib
import torch.backends.cudnn as cudnn
import nibabel as nib
import sys
import platform
import multiprocessing
import re
from ivadomed.loader.bids_dataframe import BidsDataframe
from ivadomed import evaluation as imed_evaluation
from ivadomed import config_manager as imed_config_manager
from ivadomed import testing as imed_testing
from ivadomed import training as imed_training
from ivadomed import transforms as imed_transforms
from ivadomed import utils as imed_utils
from ivadomed import metrics as imed_metrics
from ivadomed import inference as imed_inference
from ivadomed.loader import utils as imed_loader_utils, loader as imed_loader, film as imed_film
from loguru import logger
from pathlib import Path
cudnn.benchmark = True
# List of not-default available models i.e. different from Unet
MODEL_LIST = ['Modified3DUNet', 'HeMISUnet', 'FiLMedUnet', 'resnet18', 'densenet121', 'Countception']
def get_parser():
parser = argparse.ArgumentParser(add_help=False)
command_group = parser.add_mutually_exclusive_group(required=False)
command_group.add_argument("--train", dest='train', action='store_true',
help="Perform training on data.")
command_group.add_argument("--test", dest='test', action='store_true',
help="Perform testing on trained model.")
command_group.add_argument("--segment", dest='segment', action='store_true',
help="Perform segmentation on data.")
parser.add_argument("-c", "--config", required=True, type=str,
help="Path to configuration file.")
# OPTIONAL ARGUMENTS
optional_args = parser.add_argument_group('OPTIONAL ARGUMENTS')
optional_args.add_argument("-pd", "--path-data", dest="path_data", required=False, type=str,
nargs="*", help="""Path to data in BIDs format. You may list one
or more paths; separate each path with a space, e.g.
--path-data some/path/a some/path/b""")
optional_args.add_argument("-po", "--path-output", required=False, type=str, dest="path_output",
help="Path to output directory.")
optional_args.add_argument('-g', '--gif', required=False, type=int, default=0,
help='Number of GIF files to output. Each GIF file corresponds to a 2D slice showing the '
'prediction over epochs (one frame per epoch). The prediction is run on the '
'validation dataset. GIF files are saved in the output path.')
optional_args.add_argument('-t', '--thr-increment', dest="thr_increment", required=False, type=float,
help='A threshold analysis is performed at the end of the training using the trained '
'model and the training+validation sub-datasets to find the optimal binarization '
'threshold. The specified value indicates the increment between 0 and 1 used during '
'the analysis (e.g. 0.1). Plot is saved under "[PATH_OUTPUT]/thr.png" and the '
'optimal threshold in "[PATH_OUTPUT]/config_file.json as "binarize_prediction" '
'parameter.')
optional_args.add_argument('--resume-training', dest="resume_training", required=False, action='store_true',
help='Load a saved model ("checkpoint.pth.tar" in the output directory specified either with flag "--path-output" or via the config file "output_path" argument) '
'for resume training. This training state is saved everytime a new best model is saved in the output directory specified with flag "--path-output"')
optional_args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Shows function documentation.')
return parser
def create_path_model(context, model_params, ds_train, path_output, train_onehotencoder):
path_model = Path(path_output, context.get("model_name"))
if not path_model.is_dir():
logger.info(f'Creating model directory: {path_model}')
path_model.mkdir(parents=True)
if 'film_layers' in model_params and any(model_params['film_layers']):
joblib.dump(train_onehotencoder, path_model.joinpath("one_hot_encoder.joblib"))
if 'metadata_dict' in ds_train[0]['input_metadata'][0]:
metadata_dict = ds_train[0]['input_metadata'][0]['metadata_dict']
joblib.dump(metadata_dict, path_model.joinpath("metadata_dict.joblib"))
else:
logger.info(f'Model directory already exists: {path_model}')
def check_multiple_raters(is_train, loader_params):
if any([isinstance(class_suffix, list) for class_suffix in loader_params["target_suffix"]]):
logger.info(
"Annotations from multiple raters will be used during model training, one annotation from one rater "
"randomly selected at each iteration.\n")
if not is_train:
logger.error(
"Please provide only one annotation per class in 'target_suffix' when not training a model.\n")
exit()
def film_normalize_data(context, model_params, ds_train, ds_valid, path_output):
# Normalize metadata before sending to the FiLM network
results = imed_film.get_film_metadata_models(ds_train=ds_train,
metadata_type=model_params['metadata'],
debugging=context["debugging"])
ds_train, train_onehotencoder, metadata_clustering_models = results
ds_valid = imed_film.normalize_metadata(ds_valid, metadata_clustering_models, context["debugging"],
model_params['metadata'])
model_params.update({"film_onehotencoder": train_onehotencoder,
"n_metadata": len([ll for l in train_onehotencoder.categories_ for ll in l])})
joblib.dump(metadata_clustering_models, Path(path_output, "clustering_models.joblib"))
joblib.dump(train_onehotencoder, Path(path_output + "one_hot_encoder.joblib"))
return model_params, ds_train, ds_valid, train_onehotencoder
def get_dataset(bids_df, loader_params, data_lst, transform_params, cuda_available, device, ds_type):
ds = imed_loader.load_dataset(bids_df, **{**loader_params, **{'data_list': data_lst,
'transforms_params': transform_params,
'dataset_type': ds_type}}, device=device,
cuda_available=cuda_available)
return ds
def save_config_file(context, path_output):
# Save config file within path_output and path_output/model_name
# Done after the threshold_analysis to propate this info in the config files
with Path(path_output, "config_file.json").open(mode='w') as fp:
json.dump(context, fp, indent=4)
with Path(path_output, context.get("model_name"), context.get("model_name") + ".json").open(mode='w') as fp:
json.dump(context, fp, indent=4)
def set_loader_params(context, is_train):
loader_params = copy.deepcopy(context["loader_parameters"])
if is_train:
loader_params["contrast_params"]["contrast_lst"] = loader_params["contrast_params"]["training_validation"]
else:
loader_params["contrast_params"]["contrast_lst"] = loader_params["contrast_params"]["testing"]
if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]:
loader_params.update({"metadata_type": context["FiLMedUnet"]["metadata"]})
# Load metadata necessary to balance the loader
if context['training_parameters']['balance_samples']['applied'] and \
context['training_parameters']['balance_samples']['type'] != 'gt':
loader_params.update({"metadata_type": context['training_parameters']['balance_samples']['type']})
return loader_params
def set_model_params(context, loader_params):
model_params = copy.deepcopy(context["default_model"])
model_params["folder_name"] = copy.deepcopy(context["model_name"])
model_context_list = [model_name for model_name in MODEL_LIST
if model_name in context and context[model_name]["applied"]]
if len(model_context_list) == 1:
model_params["name"] = model_context_list[0]
model_params.update(context[model_context_list[0]])
elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len(model_context_list) == 2:
model_params["name"] = 'Modified3DUNet'
for i in range(len(model_context_list)):
model_params.update(context[model_context_list[i]])
elif len(model_context_list) > 1:
logger.error('ERROR: Several models are selected in the configuration file: {}.'
'Please select only one (i.e. only one where: "applied": true).'.format(model_context_list))
exit()
model_params['is_2d'] = False if "Modified3DUNet" in model_params['name'] else model_params['is_2d']
# Get in_channel from contrast_lst
if loader_params["multichannel"]:
model_params["in_channel"] = len(loader_params["contrast_params"]["contrast_lst"])
else:
model_params["in_channel"] = 1
# Get out_channel from target_suffix
model_params["out_channel"] = len(loader_params["target_suffix"])
# If multi-class output, then add background class
if model_params["out_channel"] > 1:
model_params.update({"out_channel": model_params["out_channel"] + 1})
# Display for spec' check
imed_utils.display_selected_model_spec(params=model_params)
# Update loader params
if 'object_detection_params' in context:
object_detection_params = context['object_detection_params']
object_detection_params.update({"gpu_ids": context['gpu_ids'][0],
"path_output": context['path_output']})
loader_params.update({"object_detection_params": object_detection_params})
loader_params.update({"model_params": model_params})
return model_params, loader_params
def set_output_path(context):
path_output = copy.deepcopy(context["path_output"])
if not Path(path_output).is_dir():
logger.info('Creating output path: {}'.format(path_output))
Path(path_output).mkdir(parents=True)
else:
logger.info('Output path already exists: {}'.format(path_output))
return path_output
def update_film_model_params(context, ds_test, model_params, path_output):
clustering_path = Path(path_output, "clustering_models.joblib")
metadata_clustering_models = joblib.load(clustering_path)
# Model directory
ohe_path = Path(path_output, context.get("model_name"), "one_hot_encoder.joblib")
one_hot_encoder = joblib.load(ohe_path)
ds_test = imed_film.normalize_metadata(ds_test, metadata_clustering_models, context.get("debugging"),
model_params.get('metadata'))
model_params.update({"film_onehotencoder": one_hot_encoder,
"n_metadata": len([ll for l in one_hot_encoder.categories_ for ll in l])})
return ds_test, model_params
def run_segment_command(context, model_params):
# BIDSDataframe of all image files
# Indexing of derivatives is False for command segment
bids_df = BidsDataframe(context['loader_parameters'], context['path_output'], derivatives=False)
# Append subjects filenames into a list
bids_subjects = sorted(bids_df.df.get('filename').to_list())
# Add postprocessing to packaged model
path_model = Path(context.get('path_output'), context.get('model_name'))
path_model_config = Path(path_model, context.get('model_name') + ".json")
model_config = imed_config_manager.load_json(str(path_model_config))
model_config['postprocessing'] = context.get('postprocessing')
with path_model_config.open(mode='w') as fp:
json.dump(model_config, fp, indent=4)
options = {}
# Initialize a list of already seen subject ids for multichannel
seen_subj_ids = []
for subject in bids_subjects:
if context['loader_parameters']['multichannel']:
# Get subject_id for multichannel
df_sub = bids_df.df.loc[bids_df.df['filename'] == subject]
subj_id = re.sub(r'_' + df_sub['suffix'].values[0] + '.*', '', subject)
if subj_id not in seen_subj_ids:
# if subj_id has not been seen yet
fname_img = []
provided_contrasts = []
contrasts = context['loader_parameters']['contrast_params']['testing']
# Keep contrast order
for c in contrasts:
df_tmp = bids_df.df[
bids_df.df['filename'].str.contains(subj_id) & bids_df.df['suffix'].str.contains(c)]
if ~df_tmp.empty:
provided_contrasts.append(c)
fname_img.append(df_tmp['path'].values[0])
seen_subj_ids.append(subj_id)
if len(fname_img) != len(contrasts):
logger.warning("Missing contrast for subject {}. {} were provided but {} are required. Skipping "
"subject.".format(subj_id, provided_contrasts, contrasts))
continue
else:
# Returns an empty list for subj_id already seen
fname_img = []
else:
fname_img = bids_df.df[bids_df.df['filename'] == subject]['path'].to_list()
# Add film metadata to options for segment_volume
if 'film_layers' in model_params and any(model_params['film_layers']) and model_params['metadata']:
metadata = bids_df.df[bids_df.df['filename'] == subject][model_params['metadata']].values[0]
options['metadata'] = metadata
# Add microscopy pixel size metadata to options for segment_volume
if 'PixelSize' in bids_df.df.columns:
options['pixel_size'] = bids_df.df.loc[bids_df.df['filename'] == subject]['PixelSize'].values[0]
if fname_img:
pred_list, target_list = imed_inference.segment_volume(str(path_model),
fname_images=fname_img,
gpu_id=context['gpu_ids'][0],
options=options)
pred_path = Path(context.get('path_output'), "pred_masks")
if not pred_path.exists():
pred_path.mkdir(parents=True)
for pred, target in zip(pred_list, target_list):
filename = subject.split('.')[0] + target + "_pred" + ".nii.gz"
nib.save(pred, Path(pred_path, filename))
# For Microscopy PNG/TIF files (TODO: implement OMETIFF behavior)
extension = imed_loader_utils.get_file_extension(subject)
if "nii" not in extension:
imed_inference.pred_to_png(pred_list,
target_list,
str(Path(pred_path, subject)).replace(extension, ''))
def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
"""Run main command.
This function is central in the ivadomed project as training / testing / evaluation commands
are run via this function. All the process parameters are defined in the config.
Args:
context (dict): Dictionary containing all parameters that are needed for a given process. See
:doc:`configuration_file` for more details.
n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
predictions of a given slice from the validation sub-dataset. They are saved within the output path.
thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
the training + validation sub-dataset to find the optimal binarization threshold. The specified value
indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" ' This training state is saved everytime a new best model is saved in the log
argument) for resume training directory.
Returns:
float or pandas.DataFrame or None:
* If "train" command: Returns floats: best loss score for both training and validation.
* If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of
the testing sub-dataset and return the prediction metrics before evaluation.
* If "segment" command: No return value.
"""
command = copy.deepcopy(context.get("command"))
path_output = set_output_path(context)
path_log = Path(context.get('path_output'), context.get('log_file'))
logger.remove()
logger.add(str(path_log))
logger.add(sys.stdout)
# Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present)
create_dataset_and_ivadomed_version_log(context)
cuda_available, device = imed_utils.define_device(context['gpu_ids'][0])
# BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions
context['loader_parameters']['path_data'] = imed_utils.format_path_data(context['loader_parameters']['path_data'])
# Loader params
loader_params = set_loader_params(context, command == "train")
# Get transforms for each subdataset
transform_train_params, transform_valid_params, transform_test_params = \
imed_transforms.get_subdatasets_transforms(context["transformation"])
# MODEL PARAMETERS
model_params, loader_params = set_model_params(context, loader_params)
if command == 'segment':
run_segment_command(context, model_params)
return
# BIDSDataframe of all image files
# Indexing of derivatives is True for command train and test
bids_df = BidsDataframe(loader_params, path_output, derivatives=True)
# Get subject filenames lists. "segment" command uses all participants of data path, hence no need to split
train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subject_files_list(context["split_dataset"],
bids_df.df,
path_output,
context["loader_parameters"]
['subject_selection'])
# Generating sha256 for the training files
imed_utils.generate_sha_256(context, bids_df.df, train_lst)
# TESTING PARAMS
# Aleatoric uncertainty
if context['uncertainty']['aleatoric'] and context['uncertainty']['n_it'] > 0:
transformation_dict = transform_train_params
else:
transformation_dict = transform_test_params
undo_transforms = imed_transforms.UndoCompose(imed_transforms.Compose(transformation_dict, requires_undo=True))
testing_params = copy.deepcopy(context["training_parameters"])
testing_params.update({'uncertainty': context["uncertainty"]})
testing_params.update({'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms,
'slice_axis': loader_params['slice_axis']})
if command == "train":
imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"])
imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"])
elif command == "test":
imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"])
# Check if multiple raters
check_multiple_raters(command == "train", loader_params)
if command == 'train':
# Get Validation dataset
ds_valid = get_dataset(bids_df, loader_params, valid_lst, transform_valid_params, cuda_available, device,
'validation')
# Get Training dataset
ds_train = get_dataset(bids_df, loader_params, train_lst, transform_train_params, cuda_available, device,
'training')
metric_fns = imed_metrics.get_metric_fns(ds_train.task)
# If FiLM, normalize data
if 'film_layers' in model_params and any(model_params['film_layers']):
model_params, ds_train, ds_valid, train_onehotencoder = \
film_normalize_data(context, model_params, ds_train, ds_valid, path_output)
else:
train_onehotencoder = None
# Model directory
create_path_model(context, model_params, ds_train, path_output, train_onehotencoder)
save_config_file(context, path_output)
# RUN TRAINING
best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
model_params=model_params,
dataset_train=ds_train,
dataset_val=ds_valid,
training_params=context["training_parameters"],
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
n_gif=n_gif,
resume_training=resume_training,
debugging=context["debugging"])
if thr_increment:
# LOAD DATASET
if command != 'train': # If command == train, then ds_valid already load
# Get Validation dataset
ds_valid = get_dataset(bids_df, loader_params, valid_lst, transform_valid_params, cuda_available, device,
'validation')
# Get Training dataset with no Data Augmentation
ds_train = get_dataset(bids_df, loader_params, train_lst, transform_valid_params, cuda_available, device,
'training')
# Choice of optimisation metric
metric = "recall_specificity" if model_params["name"] in imed_utils.CLASSIFIER_LIST else "dice"
# Model path
model_path = Path(path_output, "best_model.pt")
# Run analysis
thr = imed_testing.threshold_analysis(model_path=str(model_path),
ds_lst=[ds_train, ds_valid],
model_params=model_params,
testing_params=testing_params,
metric=metric,
increment=thr_increment,
fname_out=str(Path(path_output, "roc.png")),
cuda_available=cuda_available)
# Update threshold in config file
context["postprocessing"]["binarize_prediction"] = {"thr": thr}
save_config_file(context, path_output)
if command == 'train':
return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss
if command == 'test':
# LOAD DATASET
# Warn user that the input-level dropout is set during inference
if loader_params['is_input_dropout']:
logger.warning("Input-level dropout is set during testing. To turn this option off, set 'is_input_dropout'"
"to 'false' in the configuration file.")
ds_test = imed_loader.load_dataset(bids_df, **{**loader_params, **{'data_list': test_lst,
'transforms_params': transformation_dict,
'dataset_type': 'testing',
'requires_undo': True}}, device=device,
cuda_available=cuda_available)
metric_fns = imed_metrics.get_metric_fns(ds_test.task)
if 'film_layers' in model_params and any(model_params['film_layers']):
ds_test, model_params = update_film_model_params(context, ds_test, model_params, path_output)
# RUN INFERENCE
pred_metrics = imed_testing.test(model_params=model_params,
dataset_test=ds_test,
testing_params=testing_params,
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
postprocessing=context['postprocessing'])
# RUN EVALUATION
df_results = imed_evaluation.evaluate(bids_df, path_output=path_output,
target_suffix=loader_params["target_suffix"],
eval_params=context["evaluation_parameters"])
return df_results, pred_metrics
def create_dataset_and_ivadomed_version_log(context):
path_data = context.get('loader_parameters').get('path_data')
ivadomed_version = imed_utils._version_string()
datasets_version = []
if isinstance(path_data, str):
datasets_version = [imed_utils.__get_commit(path_to_git_folder=path_data)]
elif isinstance(path_data, list):
for Dataset in path_data:
datasets_version.append(imed_utils.__get_commit(path_to_git_folder=Dataset))
path_log = Path(context.get('path_output'), 'version_info.log')
try:
f = path_log.open(mode="w")
except OSError as err:
logger.error(f"OS error: {err}")
raise Exception("Have you selected a log folder, and do you have write permissions for that folder?")
# IVADOMED
f.write('IVADOMED TOOLBOX\n----------------\n(' + ivadomed_version + ')')
# DATASETS
path_data = imed_utils.format_path_data(path_data)
f.write('\n\n\nDATASET VERSION\n---------------\n')
f.write('The following BIDS dataset(s) were used for training.\n')
for i_dataset in range(len(path_data)):
if datasets_version[i_dataset] not in ['', '?!?']:
f.write(str(i_dataset + 1) + '. ' + path_data[i_dataset] + ' - Dataset Annex version: ' + datasets_version[
i_dataset] + '\n')
else:
f.write(str(i_dataset + 1) + '. ' + path_data[i_dataset] + ' - Dataset is not Annexed.\n')
# SYSTEM INFO
f.write('\n\nSYSTEM INFO\n-------------\n')
platform_running = sys.platform
if platform_running.find('darwin') != -1:
os_running = 'osx'
elif platform_running.find('linux') != -1:
os_running = 'linux'
elif platform_running.find('win32') or platform_running.find('win64'):
os_running = 'windows'
else:
os_running = 'NA'
f.write('OS: ' + os_running + ' (' + platform.platform() + ')\n')
# Display number of CPU cores
f.write('CPU cores: Available: {}\n\n\n\n\n'.format(multiprocessing.cpu_count()))
# USER INPUTS
f.write('CONFIG INPUTS\n-------------\n')
if sys.version_info[0] > 2:
for k, v in context.items():
f.write(str(k) + ': ' + str(v) + '\n') # Making sure all numbers are converted to strings
else:
for k, v in context.viewitems(): # Python2
f.write(str(k) + ': ' + str(v) + '\n')
f.close()
def run_main():
imed_utils.init_ivadomed()
parser = get_parser()
args = parser.parse_args()
# Get context from configuration file
path_config_file = args.config
context = imed_config_manager.ConfigurationManager(path_config_file).get_config()
context["command"] = imed_utils.get_command(args, context)
context["path_output"] = imed_utils.get_path_output(args, context)
context["loader_parameters"]["path_data"] = imed_utils.get_path_data(args, context)
# Run command
run_command(context=context,
n_gif=args.gif if args.gif is not None else 0,
thr_increment=args.thr_increment if args.thr_increment else None,
resume_training=bool(args.resume_training))
if __name__ == "__main__":
run_main()
|
11516021
|
import time
from nose2 import events
ERROR = "error"
FAIL = "failed"
SKIP = "skipped"
PASS = "passed"
SUBTEST = "subtest"
__unittest = True
class PluggableTestResult(object):
"""Test result that defers to plugins.
All test outcome recording and reporting is deferred to plugins,
which are expected to implement :func:`startTest`, :func:`stopTest`,
:func:`testOutcome`, and :func:`wasSuccessful`.
:param session: Test run session.
.. attribute :: shouldStop
When ``True``, test run should stop before running another test.
"""
def __init__(self, session):
self.session = session
self.shouldStop = False
# XXX TestCase.subTest expects a result.failfast attribute
self.failfast = False
def startTest(self, test):
"""Start a test case.
Fires :func:`startTest` hook.
"""
event = events.StartTestEvent(test, self, time.time())
self.session.hooks.startTest(event)
def stopTest(self, test):
"""Stop a test case.
Fires :func:`stopTest` hook.
"""
event = events.StopTestEvent(test, self, time.time())
self.session.hooks.stopTest(event)
def addError(self, test, err):
"""Test case resulted in error.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, ERROR, err)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addFailure(self, test, err):
"""Test case resulted in failure.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, FAIL, err)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addSubTest(self, test, subtest, err):
"""Called at the end of a subtest.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(subtest, self, SUBTEST, err)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addSuccess(self, test):
"""Test case resulted in success.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, PASS, expected=True)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addSkip(self, test, reason):
"""Test case was skipped.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, SKIP, reason=reason)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addExpectedFailure(self, test, err):
"""Test case resulted in expected failure.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, FAIL, err, expected=True)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def addUnexpectedSuccess(self, test):
"""Test case resulted in unexpected success.
Fires :func:`setTestOutcome` and :func:`testOutcome` hooks.
"""
event = events.TestOutcomeEvent(test, self, PASS)
self.session.hooks.setTestOutcome(event)
self.session.hooks.testOutcome(event)
def wasSuccessful(self):
"""Was test run successful?
Fires :func:`wasSuccessful` hook, and returns ``event.success``.
"""
# assume failure; plugins must explicitly declare success
try:
return self._success
except AttributeError:
event = events.ResultSuccessEvent(self, None)
self.session.hooks.wasSuccessful(event)
self._success = event.success
return self._success
def stop(self):
"""Stop test run.
Fires :func:`resultStop` hook, and sets ``self.shouldStop`` to
``event.shouldStop``.
"""
event = events.ResultStopEvent(self, True)
self.session.hooks.resultStop(event)
self.shouldStop = event.shouldStop
def __repr__(self):
return "<%s>" % self.__class__.__name__
|
11516051
|
import importlib
import warnings
from typing import List
# global ceph_flag, mc_flag, linklink_flag
# ceph_flag, mc_flag, linklink_flag = True, True, True
# def try_import_ceph():
# """
# Overview:
# Try import ceph module, if failed, return None
# Returns:
# module: imported module, or None when ceph not found
# """
# global ceph_flag
# try:
# import ceph
# except ModuleNotFoundError as e:
# if ceph_flag:
# warnings.warn(
# "You have not installed ceph package! If you are not run locally and testing, "
# "ask coworker for help."
# )
# ceph = None
# ceph_flag = False
# return ceph
# def try_import_mc():
# """
# Overview:
# Try import mc module, if failed, return None
# Returns:
# module: imported module, or None when mc not found
# """
# global mc_flag
# try:
# import mc
# except ModuleNotFoundError as e:
# if mc_flag:
# warnings.warn(
# "You have not installed mc package! If you are not run locally and testing, "
# "ask coworker for help."
# )
# mc = None
# mc_flag = False
# return mc
# def try_import_link():
# global linklink_flag
# """
# Overview:
# Try import linklink module, if failed, import di.tests.fake_linklink instead
# Returns:
# module: imported module (may be fake_linklink)
# """
# try:
# import linklink as link
# except ModuleNotFoundError as e:
# if linklink_flag:
# warnings.warn(
# "You have not installed linklink package! If you are not run locally and testing, "
# "ask coworker for help. We will run a fake linklink."
# "Refer to di.utils.fake_linklink.py for details."
# )
# from .fake_linklink import link
# linklink_flag = False
# return link
def import_module(modules: List[str]) -> None:
"""
Overview:
Import several module as a list
Args:
- modules (:obj:`list` of `str`): List of module names
"""
for name in modules:
importlib.import_module(name)
|
11516069
|
import os
import sys
from datetime import datetime
import csv
import json
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, 'common'))
import parsing_lib
_AGE = "NU_IDADE_N"
_AGE_TYPE = "TP_IDADE"
_GENDER = "CS_SEXO"
_ETHNICITY = "CS_RACA"
_STATE = "SG_UF_NOT"
_MUNICIPALITY = "CO_MUN_NOT"
_DATE_CONFIRMED = "DT_NOTIFIC"
_COVID_CONFIRMED = "CLASSI_FIN"
_SEROLOGICAL_TEST_IGG = "RES_IGG"
_SEROLOGICAL_TEST_IGM = "RES_IGM"
_SEROLOGICAL_TEST_IGA = "RES_IGA"
_PCR_TEST = "PCR_SARS2"
_DATE_SYMPTOMS = "DT_SIN_PRI"
_PREGNANCY = "CS_GESTANT"
_FEVER = "FEBRE"
_COUGH = "TOSSE"
_SORE_THROAT = "GARGANTA"
_DYSPNEA = "DISPNEIA"
_BREATHING_DIFFICULTY = "DESC_RESP"
_LOW_OXYGEN = "SATURACAO"
_DIARRHOEA = "DIARREIA"
_VOMITING = "VOMITO"
_STOMACH_ACHE = "DOR_ABD"
_FATIGUE = "FADIGA"
_SMELL = "PERD_OLFT"
_TASTE = "PERD_PALA"
_HEART = "CARDIOPATI"
_HEMATOLOGIC = "HEMATOLOGI"
_DOWN_SYND = "SIND_DOWN"
_LIVER = "HEPATICA"
_ASTHMA = "ASMA"
_DIABETES = "DIABETES"
_NEUROLOGIC = "NEUROLOGIC"
_LUNG = "PNEUMOPATI"
_KIDNEY = "RENAL"
_OBESITY = "OBESIDADE"
_OTHER_COMORB = "MORB_DESC"
_HOSPITALIZED = "HOSPITAL"
_DATE_HOSP = "DT_INTERNA"
_ICU = "UTI"
_ICU_ENTRY = "DT_ENTUTI"
_ICU_DISCHARGE = "DT_SAIDUTI"
_OUTCOME = "EVOLUCAO"
_DATE_OUTCOME = "DT_EVOLUCA"
_TRAVEL_YN = "HISTO_VGM"
_TRAVEL_COUNTRY = "PAIS_VGM"
_TRAVEL_OUT = "DT_VGM"
_TRAVEL_RETURN = "DT_RT_VGM"
_COMORBIDITIES_MAP = {
"DIABETES": "diabetes mellitus",
"CS_GESTANT": "pregnancy",
"RENAL": "chronic kidney disease",
"CARDIOPATI": "heart disease",
"OBESIDADE": "obesity",
"SIND_DOWN": "Down syndrome",
"HEPATICA": "liver disease",
"ASMA": "asthma",
"NEUROLOGIC": "nervous system disease",
"PNEUMOPATI": "respiratory system disease",
}
_SYMPTOMS_MAP = {
"PERD_PALA": "taste alteration",
"PERD_OLFT": "smell alteration",
"GARGANTA": "throat pain",
"DISPNEIA": "dyspnea",
"FEBRE": "fever",
"TOSSE": "cough",
# According to symptom ontology, breathing difficulty is exact synonym of dyspnea
"DESC_RESP": "dyspnea",
"SATURACAO": "hypoxemia",
"DIARREIA": "diarrhoea",
"VOMITO": "vomiting",
"DOR_ABD": "abdominal discomfort",
"FADIGA": "fatigue"
}
# 'UF_name' maps the UF (Unidade Federativa, admin level 1) codes to their respective names
# 'code_name_latlong' maps the municipality codes obtained from https://www.ibge.gov.br/en/geosciences/territorial-organization/territorial-meshes/2786-np-municipal-mesh/18890-municipal-mesh.html?=&t=acesso-ao-produto to respective name and lat/longs.The final digit is omitted as it is not included in the data.
# 'country_iso2' maps Spanish country names to their ISO-2 codes, and also includes common alternative spellings of country names as observed in data (e.g. lack of accents, common typos)
# 'country_translate_lat_long' maps country ISO-2 codes to longitude/latitude of country centroids, obtained from https://raw.githubusercontent.com/google/dspl/master/samples/google/canonical/countries.csv, as well as the corresponding country name in English
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "dictionaries.json"), encoding='utf-8') as json_file:
dictionaries = json.load(json_file)
_UF_NAME_MAP = dictionaries["UF_name"]
_CODE_NAME_LATLONG = dictionaries["code_name_latlong"]
_COUNTRY_ISO2_MAP = dictionaries["country_iso2"]
# Date function for ADI and mongoimport format
def convert_date(raw_date: str, reference_date=None, dataserver=True, adi = True):
"""
Convert raw date field into a value interpretable by the dataserver.
Removing timestamp as always midnight.
Set dataserver to False in order to return version appropriate for notes.
Set adi to True to return version compatible with automated data ingestion
Can supply a reference_date, so that any case with a date after this reference returns None
A reference date of midnight on Feb 21st would only return cases up to the end of Feb 20th
Hospitalization dates are sometimes given as dates in the future, which aren't allowed
"""
today = datetime.now().date()
if adi is False:
if raw_date and datetime.strptime(raw_date, "%d/%m/%Y") < datetime.strptime(str(today), "%Y-%m-%d"):
date = datetime.strptime(raw_date, "%d/%m/%Y")
return {"$date": f"{date.isoformat()}Z"}
if not dataserver:
return date.strftime("%m/%d/%Y")
else:
if raw_date and datetime.strptime(raw_date, "%d/%m/%Y") < datetime.strptime(str(today), "%Y-%m-%d"):
date = datetime.strptime(raw_date, "%d/%m/%Y")
return date.strftime("%m/%d/%YZ")
def convert_location(state, municipality):
location = {}
geometry = {}
location["country"] = "Brazil"
location["administrativeAreaLevel1"] = _UF_NAME_MAP[state]
location["administrativeAreaLevel2"] = _CODE_NAME_LATLONG[municipality]["name"]
location["geoResolution"] = "Admin2"
location["name"] = ", ".join([_CODE_NAME_LATLONG[municipality]["name"], _UF_NAME_MAP[state], "Brazil"])
geometry["latitude"] = _CODE_NAME_LATLONG[municipality]["latitude"]
geometry["longitude"] = _CODE_NAME_LATLONG[municipality]["longitude"]
location["geometry"] = geometry
return location
def convert_gender(raw_gender: str):
if raw_gender == "M":
return "Male"
elif raw_gender == "F":
return "Female"
elif raw_gender == "I":
return "Other"
def convert_test(serological_igg, serological_igm, serological_iga, pcr):
if pcr == "1":
return "PCR test"
if any(i == "1" for i in [serological_igg, serological_igm, serological_iga]):
return "Serological test"
def convert_events(date_confirmed, date_symptoms, serological_igg, serological_igm, serological_iga, pcr, hospitalized, date_hospitalized, icu, date_icu_entry, date_icu_discharge, outcome, date_outcome):
events = [
{
"name": "confirmed",
"dateRange": {
"start": convert_date(date_confirmed),
"end": convert_date(date_confirmed)
},
"value": convert_test(serological_igg, serological_igm, serological_iga, pcr)
}
]
if date_symptoms:
events.append(
{
"name": "onsetSymptoms",
"dateRange": {
"start": convert_date(date_symptoms),
"end": convert_date(date_symptoms)
},
}
)
if hospitalized == "1":
events.append(
{
"name": "hospitalAdmission",
"value": "Yes",
"dateRange": {
"start": convert_date(date_hospitalized),
"end": convert_date(date_hospitalized)
}
}
)
elif hospitalized == "2":
events.append(
{
"name": "hospitalAdmission",
"value": "No"
}
)
if icu == "1":
events.append(
{
"name": "icuAdmission",
"value": "Yes",
"dateRange": {
"start": convert_date(date_icu_entry),
"end": convert_date(date_icu_discharge)
}
}
)
elif icu == "2":
events.append(
{
"name": "icuAdmission",
"value": "No"
}
)
if outcome == "1":
events.append(
{
"name": "outcome",
"value": "Recovered",
"dateRange": {
"start": convert_date(date_outcome),
"end": convert_date(date_outcome)
}
}
)
#outcome == 3 signifies death from other causes
elif outcome == "2" or outcome == "3":
events.append(
{
"name": "outcome",
"value": "Death",
"dateRange": {
"start": convert_date(date_outcome),
"end": convert_date(date_outcome)
}
}
)
return events
def convert_symptoms(taste, smell, throat, dyspnea, fever, cough, diff_breathing, hypoxemia, diarrhoea, vomiting, abdominal, fatigue):
symptoms = {}
if any([i=="1" for i in [taste, smell, throat, dyspnea, fever, cough, diff_breathing, hypoxemia, diarrhoea, vomiting, abdominal, fatigue]]):
symptoms["status"] = "Symptomatic"
values = []
if taste == "1":
values.append(_SYMPTOMS_MAP["PERD_PALA"])
if smell == "1":
values.append(_SYMPTOMS_MAP["PERD_OLFT"])
if throat == "1":
values.append(_SYMPTOMS_MAP["GARGANTA"])
if dyspnea == "1":
values.append(_SYMPTOMS_MAP["DISPNEIA"])
if fever == "1":
values.append(_SYMPTOMS_MAP["FEBRE"])
if cough == "1":
values.append(_SYMPTOMS_MAP["TOSSE"])
if diff_breathing == "1":
values.append(_SYMPTOMS_MAP["DESC_RESP"])
if hypoxemia == "1":
values.append(_SYMPTOMS_MAP["SATURACAO"])
if diarrhoea == "1":
values.append(_SYMPTOMS_MAP["DIARREIA"])
if vomiting == "1":
values.append(_SYMPTOMS_MAP["VOMITO"])
if abdominal == "1":
values.append(_SYMPTOMS_MAP["DOR_ABD"])
if fatigue == "1":
values.append(_SYMPTOMS_MAP["FADIGA"])
if values:
#Remove possible duplicate dyspnea entry
symptoms["values"] = list(dict.fromkeys(values))
return symptoms
def convert_preexisting_conditions(diabetes, pregnancy, kidney, heart, obesity, down, liver, asthma, nervous, respiratory, other):
preexistingConditions = {}
values = []
if diabetes == "1":
values.append(_COMORBIDITIES_MAP["DIABETES"])
if any([pregnancy == i for i in ["1", "2", "3", "4"]]):
values.append(_COMORBIDITIES_MAP["CS_GESTANT"])
if kidney == "1":
values.append(_COMORBIDITIES_MAP["RENAL"])
if heart == "1":
values.append(_COMORBIDITIES_MAP["CARDIOPATI"])
if obesity == "1":
values.append(_COMORBIDITIES_MAP["OBESIDADE"])
if down == "1":
values.append(_COMORBIDITIES_MAP["SIND_DOWN"])
if liver == "1":
values.append(_COMORBIDITIES_MAP["HEPATICA"])
if asthma == "1":
values.append(_COMORBIDITIES_MAP["ASMA"])
if nervous == "1":
values.append(_COMORBIDITIES_MAP["NEUROLOGIC"])
if respiratory == "1":
values.append(_COMORBIDITIES_MAP["PNEUMOPATI"])
if other:
values.append(str('other comorbidity listed as: ' + other))
if values:
preexistingConditions["hasPreexistingConditions"] = True
preexistingConditions["values"] = values
return preexistingConditions
def convert_demographics(gender: str, age: str, age_type, ethnicity):
demo = {}
demo["gender"] = convert_gender(gender)
# 3 indicates an age in years
if age_type == "3":
demo["ageRange"] = {"start": float(age), "end": float(age)}
# 2 indicates an age in months
elif age_type == "2":
demo["ageRange"] = {"start": float(age)/12, "end": float(age)/12}
# 1 indicates an age in days; 365.25 is average number of days a year
elif age_type == "1":
demo["ageRange"] = {"start": float(age)/365.25, "end": float(age)/365.25}
demo["ethnicity"] = convert_ethnicity(ethnicity)
return demo
def convert_ethnicity(ethnicity: str):
if ethnicity == "2":
return "Black"
elif ethnicity == "4":
return "Mixed"
elif ethnicity == "3":
return "Asian"
elif ethnicity == "1":
return "White"
elif ethnicity == "5":
return "Indigenous"
def convert_travel(travel_yn, travel_country, travel_out, travel_in):
'''
International travel within 14 days before symptoms appeared is recorded.
'''
if travel_yn == "1":
travel = {}
travel_countries = []
country_ISO2 = _COUNTRY_ISO2_MAP[travel_country.lower()]
travel_countries.append({"location": parsing_lib.geocode_country(country_ISO2)})
travel["traveledPrior30Days"] = True
travel["travel"] = travel_countries
travel["dateRange"] = {"start": convert_date(travel_out), "end": convert_date(travel_in)}
if travel:
return travel
def convert_notes(outcome):
raw_notes = []
if outcome == "3":
raw_notes.append("Patient died from other causes")
if raw_notes:
return (", ").join(raw_notes)
def parse_cases(raw_data_file: str, source_id: str, source_url: str):
"""
Parses G.h-format case data from raw API data.
"""
with open(raw_data_file, "r") as f:
reader = csv.DictReader(f, delimiter=";")
for row in reader:
confirmation_date = convert_date(row[_DATE_CONFIRMED])
if confirmation_date is not None and row[_COVID_CONFIRMED] == "5":
try:
case = {
"caseReference": {"sourceId": source_id, "sourceUrl": source_url},
"location": convert_location(row[_STATE], row[_MUNICIPALITY]),
"events": convert_events(
row[_DATE_CONFIRMED],
row[_DATE_SYMPTOMS],
row[_SEROLOGICAL_TEST_IGG],
row[_SEROLOGICAL_TEST_IGM],
row[_SEROLOGICAL_TEST_IGA],
row[_PCR_TEST],
row[_HOSPITALIZED],
row[_DATE_HOSP],
row[_ICU],
row[_ICU_ENTRY],
row[_ICU_DISCHARGE],
row[_OUTCOME],
row[_DATE_OUTCOME]
),
"symptoms": convert_symptoms(
row[_TASTE],
row[_SMELL],
row[_SORE_THROAT],
row[_DYSPNEA],
row[_FEVER],
row[_COUGH],
row[_BREATHING_DIFFICULTY],
row[_LOW_OXYGEN],
row[_DIARRHOEA],
row[_VOMITING],
row[_STOMACH_ACHE],
row[_FATIGUE],
),
"demographics": convert_demographics(
row[_GENDER], row[_AGE], row[_AGE_TYPE], row[_ETHNICITY]
),
"preexistingConditions": convert_preexisting_conditions(
row[_DIABETES],
row[_PREGNANCY],
row[_KIDNEY],
row[_HEART],
row[_OBESITY],
row[_DOWN_SYND],
row[_LIVER],
row[_ASTHMA],
row[_NEUROLOGIC],
row[_LUNG],
row[_OTHER_COMORB]
),
"travelHistory": convert_travel(
row[_TRAVEL_YN],
row[_TRAVEL_COUNTRY],
row[_TRAVEL_OUT],
row[_TRAVEL_RETURN]
)
}
notes = convert_notes(
row[_OUTCOME]
)
if notes:
case["restrictedNotes"] = notes
yield case
except ValueError as ve:
raise ValueError(f"error converting case: {ve}")
def event_handler(event):
return parsing_lib.run(event, parse_cases)
|
11516149
|
import copy
import json
from file import temp_file
from pg import connection, transaction
from process import run_process
_SCHEMA_SQL = """
CREATE TABLE grandparent (
id int PRIMARY KEY,
name text NOT NULL
);
CREATE TABLE parent (
id int PRIMARY KEY,
grandparent_id int NOT NULL REFERENCES grandparent (id),
name text NOT NULL
);
CREATE TABLE child (
id int PRIMARY KEY,
parent_id int NOT NULL REFERENCES parent (id)
);
CREATE TABLE child_full (
id int PRIMARY KEY,
grandparent_name text NOT NULL,
parent_name text NOT NULL
);
"""
_SCHEMA_JSON = {
"context": ["context.example"],
"id": "test",
"tables": {
"child": {
"name": "child",
"targetKey": ["child.id"],
},
"grandparent": {
"join": "parent",
"joinOn": "grandparent.id = parent.grandparent_id",
"name": "grandparent",
},
"parent": {
"key": [{"name": "id"}],
"join": "child",
"joinKey": ["id"],
"joinMode": "async",
"joinOn": "parent.id = child.parent_id",
"name": "parent",
},
},
"targetTable": {
"key": ["id"],
"columns": ["id", "grandparent_name", "parent_name"],
"name": "child_full",
"schema": "public",
},
"targetQuery": """
SELECT c.id, g.name, p.name
FROM ${key} AS d
JOIN child c ON d.id = c.id
JOIN parent p ON c.parent_id = p.id
JOIN grandparent AS g ON p.grandparent_id = g.id
""",
}
def test_join_async(pg_database):
with temp_file("denorm-") as schema_file:
with connection("") as conn, transaction(conn) as cur:
cur.execute(_SCHEMA_SQL)
with open(schema_file, "w") as f:
json.dump(_SCHEMA_JSON, f)
output = run_process(
[
"denorm",
"create-join",
"--schema",
schema_file,
]
)
with connection("") as conn, transaction(conn) as cur:
# print(output.decode("utf-8"))
cur.execute(output.decode("utf-8"))
with connection("") as conn, transaction(conn) as cur:
cur.execute(
"""
INSERT INTO grandparent (id, name)
VALUES (9, '_');
INSERT INTO parent (id, grandparent_id, name)
VALUES (1, 9, 'A'), (2, 9, 'B');
INSERT INTO child (id, parent_id)
VALUES (1, 1), (2, 1), (3, 2);
"""
)
with connection("") as conn:
conn.autocommit = True
with conn.cursor() as cur:
while True:
cur.execute("SELECT test__pcs__parent(10)")
(result,) = cur.fetchone()
if not result:
break
with connection("") as conn, transaction(conn) as cur:
cur.execute("SELECT * FROM child_full ORDER BY id")
result = cur.fetchall()
assert result == [(1, "_", "A"), (2, "_", "A"), (3, "_", "B")]
with connection("") as conn, transaction(conn) as cur:
cur.execute("UPDATE parent SET name = 'C' WHERE id = 2")
with connection("") as conn, transaction(conn) as cur:
cur.execute("SELECT * FROM child_full ORDER BY id")
result = cur.fetchall()
assert result == [(1, "_", "A"), (2, "_", "A"), (3, "_", "B")]
# import time
# time.sleep(1000000)
with connection("") as conn:
conn.autocommit = True
with conn.cursor() as cur:
while True:
cur.execute("SELECT test__pcs__parent(10)")
(result,) = cur.fetchone()
if not result:
break
with connection("") as conn, transaction(conn) as cur:
cur.execute("TABLE test__que__parent")
result = cur.fetchall()
assert result == []
with connection("") as conn, transaction(conn) as cur:
cur.execute("SELECT * FROM child_full ORDER BY id")
result = cur.fetchall()
assert result == [(1, "_", "A"), (2, "_", "A"), (3, "_", "C")]
|
11516188
|
from .vcf_combiner import run_combiner
from .sparse_split_multi import sparse_split_multi
from ...vds import lgt_to_gt
from .densify import densify
__all__ = [
'run_combiner',
'sparse_split_multi',
'lgt_to_gt',
'densify',
]
|
11516254
|
from sqlalchemy.dialects import registry
registry.register("drill", "sqlalchemy_drill.sadrill", "DrillDialect_sadrill")
registry.register("drill.sadrill", "sqlalchemy_drill.sadrill", "DrillDialect_sadrill")
from sqlalchemy.testing.plugin.pytestplugin import *
|
11516257
|
import torchvision.transforms as T
from . import joint_transforms
from .rand_augment import RandAugment
class Transforms:
@classmethod
def get_transform(cls, transform_type):
try:
return getattr(cls, transform_type)
except AttributeError:
print(f"Invalid transform: {transform_type}, using default transform.")
return cls.default
class ClassificationTransforms(Transforms):
default = T.Compose(
[
T.Resize(224),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
rand_augment = T.Compose(
[
RandAugment(1, 6.0),
default,
]
)
class SegmentationTransforms(Transforms):
default = T.Compose(
[
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
joint_default = joint_transforms.Compose(
[
joint_transforms.RandomSizeAndCrop(
512,
crop_nopad=False,
scale_min=0.75,
scale_max=1.25,
),
joint_transforms.Resize(512),
joint_transforms.RandomHorizontallyFlip(),
]
)
color_jitter = T.Compose(
[
T.ColorJitter(0.4, 0.4, 0.4, 0.4),
default,
]
)
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
|
11516371
|
import yaml
import os
dirs = os.listdir(".")
#print(dirs)
#basename = "testing"
for basename in dirs:
try:
versions = os.listdir("./%s" % basename)
except NotADirectoryError:
continue
for version in versions:
filepath = "%s/%s/api.yaml" % (basename, version)
try:
with open(filepath, "r") as tmp:
ret = yaml.load(tmp.read())
newname = ret["name"].lower().replace(" ", "-", -1).replace(".", "-", -1)
if newname != basename:
print("Bad name: %s vs %s" % (basename, newname))
if ret["app_version"] != version:
print("Bad version (%s): %s vs %s" % (basename, version, ret["app_version"]))
#else:
# print("%s:%s is valid" % (basename, version))
except (NotADirectoryError, FileNotFoundError) as e:
#print("Error inner file: %s" % e)
pass
#for item in
|
11516374
|
def frombin(x):
vv = 0
for b in x:
vv = vv * 2 + int(b)
return vv
def lmk_output(v):
vv = frombin(v)
# print "# %8.8x"%vv
print("0 101")
print(("0 %3.3x" % (vv >> 24)))
print(("0 %3.3x" % ((vv >> 16) & 0xff)))
print(("0 %3.3x" % ((vv >> 8) & 0xff)))
print(("0 %3.3x" % ((vv) & 0xff)))
print("0 100")
# LMK01801 register names taken directly from datasheet
# R0
CLKin1_MUX = "01" # divide
CLKin1_DIV = "000" # 8
CLKin0_MUX = "01" # divide
CLKin0_DIV = "111" # 7
CLKin1_BUF_TYPE = "0" # bipolar
CLKin0_BUF_TYPE = "0" # bipolar
CLKout12_13_PD = "0"
CLKout8_11_PD = "0"
CLKout4_7_PD = "0"
CLKout0_3_PD = "0"
POWERDOWN = "0"
RESET = "1"
R0 = ("01001000" + CLKin1_MUX + CLKin1_DIV + CLKin0_MUX + CLKin0_DIV + "11" + CLKin1_BUF_TYPE +
CLKin0_BUF_TYPE + CLKout12_13_PD + CLKout8_11_PD + CLKout4_7_PD + CLKout0_3_PD + POWERDOWN +
RESET + "0000")
lmk_output(R0)
RESET = "0"
R0 = ("01001000" + CLKin1_MUX + CLKin1_DIV + CLKin0_MUX + CLKin0_DIV + "11" + CLKin1_BUF_TYPE +
CLKin0_BUF_TYPE + CLKout12_13_PD + CLKout8_11_PD + CLKout4_7_PD + CLKout0_3_PD + POWERDOWN +
RESET + "0000")
lmk_output(R0)
# R1
CLKout7_TYPE = "0000" # Powerdown
CLKout6_TYPE = "0000" # Powerdown
CLKout5_TYPE = "0001" # LVPECL
CLKout4_TYPE = "0000" # Powerdown
CLKout3_TYPE = "000" # Powerdown
CLKout2_TYPE = "110" # CMOS J13 test point
CLKout1_TYPE = "001" # LVDS DAC2
CLKout0_TYPE = "001" # LVDS DAC1
R1 = (CLKout7_TYPE + CLKout6_TYPE + CLKout5_TYPE + CLKout4_TYPE + CLKout3_TYPE + CLKout2_TYPE +
CLKout1_TYPE + CLKout0_TYPE + "0001")
lmk_output(R1)
# R2
CLKout13_TYPE = "0000" # Powerdown
CLKout12_TYPE = "0000" # Powerdown
CLKout11_TYPE = "0110" # CMOS J24 test point
CLKout10_TYPE = "0001" # LVDS J20
CLKout9_TYPE = "0000" # Powerdown
CLKout8_TYPE = "0000" # Powerdown
R2 = ("0000" + CLKout13_TYPE + CLKout12_TYPE + CLKout11_TYPE + CLKout10_TYPE + CLKout9_TYPE +
CLKout8_TYPE + "0010")
lmk_output(R2)
# R3
SYNC1_AUTO = "0"
SYNC0_AUTO = "0"
SYNC1_FAST = "1"
SYNC0_FAST = "1"
NO_SYNC_CLKout12_13 = "0"
NO_SYNC_CLKout8_11 = "0"
NO_SYNC_CLKout4_7 = "0"
NO_SYNC_CLKout0_3 = "0"
SYNC1_POL_INV = "1"
SYNC0_POL_INV = "1"
SYNC1_QUAL = "00"
CLKout12_13_HS = "0"
CLKout12_13_ADLY = "000000"
R3 = ("00010" + SYNC1_AUTO + SYNC0_AUTO + SYNC1_FAST + SYNC0_FAST + "011" + NO_SYNC_CLKout12_13 +
NO_SYNC_CLKout8_11 + NO_SYNC_CLKout4_7 + NO_SYNC_CLKout0_3 + SYNC1_POL_INV + SYNC0_POL_INV +
"0" + SYNC1_QUAL + CLKout12_13_HS + CLKout12_13_ADLY + "0011")
lmk_output(R3)
# R4
CLKout12_13_DDLY = "0000000000"
R4 = "000000000000000000" + CLKout12_13_DDLY + "0100"
lmk_output(R4)
# R5
CLKout12_13_DIV = "00000000001"
CLKout13_ADLY_SEL = "0"
CLKout12_ADLY_SEL = "0"
CLKout8_11_DIV = "001" # 1
CLKout4_7_DIV = "001" # 1
CLKout0_3_DIV = "010" # 2
R5 = ("0000" + CLKout12_13_DIV + "00" + CLKout13_ADLY_SEL + CLKout12_ADLY_SEL + CLKout8_11_DIV +
CLKout4_7_DIV + CLKout0_3_DIV + "0101")
lmk_output(R5)
# R15
uWireLock = "0"
R15 = "000000000000000000000101111" + uWireLock + "1111"
lmk_output(R15)
# AD9653
def adc_output(c, a, v):
vv = frombin(v)
print(("0 %3.3x" % (c + 0x100)))
print(("0 %3.3x" % ((
(a + 0) >> 8) & 0xff))) # w=0 for transfer length 1 octet
print(("0 %3.3x" % (a & 0xff)))
print(("0 %3.3x" % vv))
print("0 100")
def adc_read(c, a):
print(("0 %3.3x" % (c + 0x100)))
print(("0 %3.3x" % ((
(a + 32768) >> 8) & 0xff))) # w=0 for transfer length 1 octet
print(("0 %3.3x" % (a & 0xff)))
print(("0 %3.3x" % (c + 0x190))) # turn read bit on and P2_ADC_SDIO_DIR off
print("0 000") # pad data to shift out
print("0 100")
for chip in (2, 3):
adc_output(chip, 0, "00011000") # MSB first, SDO inactive
adc_read(chip, 1)
adc_read(chip, 2)
def ad7794_status():
print("0 107")
print("0 048")
print("0 187")
print("0 055") # padding, supposed to cme back 00
print("0 055") # padding, supposed to cme back 0a
print("0 100")
ad7794_status()
|
11516375
|
def string_with_arrows(text, pos_start, pos_end):
result = ''
# Calculate indices
idx_start = max(text.rfind('\n', 0, pos_start.idx), 0)
idx_end = text.find('\n', idx_start + 1)
if idx_end < 0: idx_end = len(text)
# Generate each line
line_count = pos_end.ln - pos_start.ln + 1
for i in range(line_count):
# Calculate line columns
line = text[idx_start:idx_end]
col_start = pos_start.col if i == 0 else 0
col_end = pos_end.col if i == line_count - 1 else len(line) - 1
# Append to result
result += line + '\n'
result += ' ' * col_start + '^' * (col_end - col_start)
# Re-calculate indices
idx_start = idx_end
idx_end = text.find('\n', idx_start + 1)
if idx_end < 0: idx_end = len(text)
return result.replace('\t', '')
|
11516411
|
import kitty.conf.utils as ku
import kitty.key_encoding as ke
from kitty import keys
import re
def main():
""" needed but not used """
pass
def actions(extended):
yield keys.defines.GLFW_PRESS
if extended:
yield keys.defines.GLFW_RELEASE
def convert_mods(mods):
"""
converts key_encoding.py style mods to glfw style mods as required by key_to_bytes
"""
glfw_mods = 0
if mods & ke.SHIFT:
glfw_mods |= keys.defines.GLFW_MOD_SHIFT
if mods & ke.ALT:
glfw_mods |= keys.defines.GLFW_MOD_ALT
if mods & ke.CTRL:
glfw_mods |= keys.defines.GLFW_MOD_CONTROL
if mods & ke.SUPER:
glfw_mods |= keys.defines.GLFW_MOD_SUPER
return glfw_mods
def pass_key(key_combination: str, w):
"""
pass key_combination to the kitty window w.
Args:
key_combination (str): keypress to pass. e.g. ctrl-j
w (kitty window): window to pass the keys
"""
mods, key, is_text = ku.parse_kittens_shortcut(key_combination)
extended = w.screen.extended_keyboard
for action in actions(extended):
sequence = (
('\x1b_{}\x1b\\' if extended else '{}')
.format(
keys.key_to_bytes(
getattr(keys.defines, 'GLFW_KEY_{}'.format(key.upper())),
w.screen.cursor_key_mode, extended, convert_mods(mods), action)
.decode('ascii')))
print(repr(sequence))
w.write_to_child(sequence)
def handle_result(args, result, target_window_id, boss):
""" Main entry point for the kitten. Decide wether to change window or pass
the keypress
Args:
args (list): Extra arguments passed when calling this kitten
[0] (str): kitten name
[1] (str): direction to move
[2] (str): key to pass
The rest of the arguments comes from kitty
"""
# get active window and tab from target_window_id
w = boss.window_id_map.get(target_window_id)
if w is None:
return
# Check if keyword in the foreground process
proc = w.child.foreground_processes[0]['cmdline']
keywords = ['vim', 'nvim', 'ssh', 'tmux']
for keyword in keywords:
if keyword in proc:
pass_key(args[2], w)
return
# keywords not found, move to neighboring window instead
boss.active_tab.neighboring_window(args[1])
handle_result.no_ui = True
|
11516417
|
import os
sep = "/"
def normcase(s):
return s
def normpath(s):
return s
def abspath(s):
if s[0] != "/":
return os.getcwd() + "/" + s
return s
def join(*args):
# TODO: this is non-compliant
if type(args[0]) is bytes:
return b"/".join(args)
else:
return "/".join(args)
def split(path):
if path == "":
return ("", "")
r = path.rsplit("/", 1)
if len(r) == 1:
return ("", path)
head = r[0] # .rstrip("/")
if not head:
head = "/"
return (head, r[1])
def dirname(path):
return split(path)[0]
def basename(path):
return split(path)[1]
def exists(path):
return os.access(path, os.F_OK)
# TODO
lexists = exists
def isdir(path):
import stat
try:
mode = os.stat(path)[0]
return stat.S_ISDIR(mode)
except OSError:
return False
def expanduser(s):
if s == "~" or s.startswith("~/"):
h = os.getenv("HOME")
return h + s[1:]
if s[0] == "~":
# Sorry folks, follow conventions
return "/home/" + s[1:]
return s
|
11516419
|
import komand
from .schema import GetActiveConfigInput, GetActiveConfigOutput
# Custom imports below
class GetActiveConfig(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_active_config",
description="Fetch the currently active application configuration",
input=GetActiveConfigInput(),
output=GetActiveConfigOutput(),
)
def run(self, params={}):
app_id = params.get("app_id")
config = self.connection.api.get_active_config(app_id)
return {"config": config}
def test(self):
return {}
|
11516433
|
from collections import OrderedDict
import cv2
import logging
import numpy as np
import gym
from typing import Any, List
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.spaces.repeated import Repeated
from ray.rllib.utils.typing import TensorType
ATARI_OBS_SHAPE = (210, 160, 3)
ATARI_RAM_OBS_SHAPE = (128, )
# Only validate env observations vs the observation space every n times in a
# Preprocessor.
OBS_VALIDATION_INTERVAL = 100
logger = logging.getLogger(__name__)
@PublicAPI
class Preprocessor:
"""Defines an abstract observation preprocessor function.
Attributes:
shape (List[int]): Shape of the preprocessed output.
"""
@PublicAPI
def __init__(self, obs_space: gym.Space, options: dict = None):
legacy_patch_shapes(obs_space)
self._obs_space = obs_space
if not options:
from ray.rllib.models.catalog import MODEL_DEFAULTS
self._options = MODEL_DEFAULTS.copy()
else:
self._options = options
self.shape = self._init_shape(obs_space, self._options)
self._size = int(np.product(self.shape))
self._i = 0
@PublicAPI
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
"""Returns the shape after preprocessing."""
raise NotImplementedError
@PublicAPI
def transform(self, observation: TensorType) -> np.ndarray:
"""Returns the preprocessed observation."""
raise NotImplementedError
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
"""Alternative to transform for more efficient flattening."""
array[offset:offset + self._size] = self.transform(observation)
def check_shape(self, observation: Any) -> None:
"""Checks the shape of the given observation."""
if self._i % OBS_VALIDATION_INTERVAL == 0:
if type(observation) is list and isinstance(
self._obs_space, gym.spaces.Box):
observation = np.array(observation)
try:
if not self._obs_space.contains(observation):
raise ValueError(
"Observation ({}) outside given space ({})!",
observation, self._obs_space)
except AttributeError:
raise ValueError(
"Observation for a Box/MultiBinary/MultiDiscrete space "
"should be an np.array, not a Python list.", observation)
self._i += 1
@property
@PublicAPI
def size(self) -> int:
return self._size
@property
@PublicAPI
def observation_space(self) -> gym.Space:
obs_space = gym.spaces.Box(-1., 1., self.shape, dtype=np.float32)
# Stash the unwrapped space so that we can unwrap dict and tuple spaces
# automatically in modelv2.py
classes = (DictFlatteningPreprocessor, OneHotPreprocessor,
RepeatedValuesPreprocessor, TupleFlatteningPreprocessor)
if isinstance(self, classes):
obs_space.original_space = self._obs_space
return obs_space
class GenericPixelPreprocessor(Preprocessor):
"""Generic image preprocessor.
Note: for Atari games, use config {"preprocessor_pref": "deepmind"}
instead for deepmind-style Atari preprocessing.
"""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
self._grayscale = options.get("grayscale")
self._zero_mean = options.get("zero_mean")
self._dim = options.get("dim")
if self._grayscale:
shape = (self._dim, self._dim, 1)
else:
shape = (self._dim, self._dim, 3)
return shape
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
"""Downsamples images from (210, 160, 3) by the configured factor."""
self.check_shape(observation)
scaled = observation[25:-25, :, :]
if self._dim < 84:
scaled = cv2.resize(scaled, (84, 84))
# OpenAI: Resize by half, then down to 42x42 (essentially mipmapping).
# If we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
scaled = cv2.resize(scaled, (self._dim, self._dim))
if self._grayscale:
scaled = scaled.mean(2)
scaled = scaled.astype(np.float32)
# Rescale needed for maintaining 1 channel
scaled = np.reshape(scaled, [self._dim, self._dim, 1])
if self._zero_mean:
scaled = (scaled - 128) / 128
else:
scaled *= 1.0 / 255.0
return scaled
class AtariRamPreprocessor(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
return (128, )
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
return (observation.astype("float32") - 128) / 128
class OneHotPreprocessor(Preprocessor):
"""One-hot preprocessor for Discrete and MultiDiscrete spaces.
Examples:
>>> self.transform(Discrete(3).sample())
... np.array([0.0, 1.0, 0.0])
>>> self.transform(MultiDiscrete([2, 3]).sample())
... np.array([0.0, 1.0, 0.0, 0.0, 1.0])
"""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
if isinstance(obs_space, gym.spaces.Discrete):
return (self._obs_space.n, )
else:
return (np.sum(self._obs_space.nvec), )
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
arr = np.zeros(self._init_shape(self._obs_space, {}), dtype=np.float32)
if isinstance(self._obs_space, gym.spaces.Discrete):
arr[observation] = 1
else:
for i, o in enumerate(observation):
arr[np.sum(self._obs_space.nvec[:i]) + o] = 1
return arr
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
array[offset:offset + self.size] = self.transform(observation)
class NoPreprocessor(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
return self._obs_space.shape
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
return observation
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
array[offset:offset + self._size] = np.array(
observation, copy=False).ravel()
@property
@override(Preprocessor)
def observation_space(self) -> gym.Space:
return self._obs_space
class TupleFlatteningPreprocessor(Preprocessor):
"""Preprocesses each tuple element, then flattens it all into a vector.
RLlib models will unpack the flattened output before _build_layers_v2().
"""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
assert isinstance(self._obs_space, gym.spaces.Tuple)
size = 0
self.preprocessors = []
for i in range(len(self._obs_space.spaces)):
space = self._obs_space.spaces[i]
logger.debug("Creating sub-preprocessor for {}".format(space))
preprocessor = get_preprocessor(space)(space, self._options)
self.preprocessors.append(preprocessor)
size += preprocessor.size
return (size, )
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
array = np.zeros(self.shape, dtype=np.float32)
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
assert len(observation) == len(self.preprocessors), observation
for o, p in zip(observation, self.preprocessors):
p.write(o, array, offset)
offset += p.size
class DictFlatteningPreprocessor(Preprocessor):
"""Preprocesses each dict value, then flattens it all into a vector.
RLlib models will unpack the flattened output before _build_layers_v2().
"""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
assert isinstance(self._obs_space, gym.spaces.Dict)
size = 0
self.preprocessors = []
for space in self._obs_space.spaces.values():
logger.debug("Creating sub-preprocessor for {}".format(space))
preprocessor = get_preprocessor(space)(space, self._options)
self.preprocessors.append(preprocessor)
size += preprocessor.size
return (size, )
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
array = np.zeros(self.shape, dtype=np.float32)
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
if not isinstance(observation, OrderedDict):
observation = OrderedDict(sorted(observation.items()))
assert len(observation) == len(self.preprocessors), \
(len(observation), len(self.preprocessors))
for o, p in zip(observation.values(), self.preprocessors):
p.write(o, array, offset)
offset += p.size
class RepeatedValuesPreprocessor(Preprocessor):
"""Pads and batches the variable-length list value."""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
assert isinstance(self._obs_space, Repeated)
child_space = obs_space.child_space
self.child_preprocessor = get_preprocessor(child_space)(child_space,
self._options)
# The first slot encodes the list length.
size = 1 + self.child_preprocessor.size * obs_space.max_len
return (size, )
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
array = np.zeros(self.shape)
if isinstance(observation, list):
for elem in observation:
self.child_preprocessor.check_shape(elem)
else:
pass # ValueError will be raised in write() below.
self.write(observation, array, 0)
return array
@override(Preprocessor)
def write(self, observation: TensorType, array: np.ndarray,
offset: int) -> None:
if not isinstance(observation, list):
raise ValueError("Input for {} must be list type, got {}".format(
self, observation))
elif len(observation) > self._obs_space.max_len:
raise ValueError("Input {} exceeds max len of space {}".format(
observation, self._obs_space.max_len))
# The first slot encodes the list length.
array[offset] = len(observation)
for i, elem in enumerate(observation):
offset_i = offset + 1 + i * self.child_preprocessor.size
self.child_preprocessor.write(elem, array, offset_i)
@PublicAPI
def get_preprocessor(space: gym.Space) -> type:
"""Returns an appropriate preprocessor class for the given space."""
legacy_patch_shapes(space)
obs_shape = space.shape
if isinstance(space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
preprocessor = OneHotPreprocessor
elif obs_shape == ATARI_OBS_SHAPE:
preprocessor = GenericPixelPreprocessor
elif obs_shape == ATARI_RAM_OBS_SHAPE:
preprocessor = AtariRamPreprocessor
elif isinstance(space, gym.spaces.Tuple):
preprocessor = TupleFlatteningPreprocessor
elif isinstance(space, gym.spaces.Dict):
preprocessor = DictFlatteningPreprocessor
elif isinstance(space, Repeated):
preprocessor = RepeatedValuesPreprocessor
else:
preprocessor = NoPreprocessor
return preprocessor
def legacy_patch_shapes(space: gym.Space) -> List[int]:
"""Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shapes properly
for Tuple and Discrete spaces.
"""
if not hasattr(space, "shape"):
if isinstance(space, gym.spaces.Discrete):
space.shape = ()
elif isinstance(space, gym.spaces.Tuple):
shapes = []
for s in space.spaces:
shape = legacy_patch_shapes(s)
shapes.append(shape)
space.shape = tuple(shapes)
return space.shape
|
11516524
|
import argparse
import logging
import string
from keras.optimizers import RMSprop
from keras.utils import plot_model
from pyfiction.agents.ssaqn_agent import SSAQNAgent
from pyfiction.simulators.games.catsimulator2016_simulator import CatSimulator2016Simulator
from pyfiction.simulators.games.machineofdeath_simulator import MachineOfDeathSimulator
from pyfiction.simulators.games.savingjohn_simulator import SavingJohnSimulator
from pyfiction.simulators.games.starcourt_simulator import StarCourtSimulator
from pyfiction.simulators.games.theredhair_simulator import TheRedHairSimulator
from pyfiction.simulators.games.transit_simulator import TransitSimulator
from pyfiction.simulators.text_games.simulators.MySimulator import StoryNode
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
"""
An SSAQN agent that supports leave-one-out generalisation testing
"""
simulators = [CatSimulator2016Simulator(),
MachineOfDeathSimulator(),
SavingJohnSimulator(),
StarCourtSimulator(),
TheRedHairSimulator(),
TransitSimulator()]
test_steps = [
1,
5,
1,
5,
1,
1
]
parser = argparse.ArgumentParser()
parser.add_argument('--simulator',
help='index of a simulator to use for leave-one-out testing [1-6], 0 for training and testing all',
type=int,
default=0)
parser.add_argument('--log_folder',
help='a folder to store logs in, default is "logs"',
type=str,
default="logs")
args = parser.parse_args()
simulator_index = args.simulator
log_folder = args.log_folder
if simulator_index == 0:
train_simulators = simulators
test_simulators = simulators
print('Training and testing on all games:', [simulator.game.name for simulator in simulators])
else:
train_simulators = simulators[:simulator_index - 1] + simulators[simulator_index:]
test_simulators = simulators[simulator_index - 1]
test_steps = test_steps[simulator_index - 1]
print('Training on games:', [simulator.game.name for simulator in train_simulators])
print('Testing on game:', test_simulators.game.name)
# Create the agent and specify maximum lengths of descriptions (in words)
agent = SSAQNAgent(train_simulators=train_simulators, test_simulators=test_simulators, log_folder=log_folder)
# Load or learn the vocabulary (random sampling on this many games could be extremely slow)
agent.initialize_tokens('vocabulary.txt')
optimizer = RMSprop(lr=0.00001)
embedding_dimensions = 16
lstm_dimensions = 32
dense_dimensions = 8
agent.create_model(embedding_dimensions=embedding_dimensions,
lstm_dimensions=lstm_dimensions,
dense_dimensions=dense_dimensions,
optimizer=optimizer)
# Visualize the model
try:
plot_model(agent.model, to_file='model.png', show_shapes=True)
except ImportError as e:
logger.warning("Couldn't print the model image: {}".format(e))
# Iteratively train the agent on five out of the six games or on all six games
# This example seems to converge to the optimal reward in all games but Star Court when training on all games
epochs = 1
for i in range(epochs):
logger.info('Epoch %s', i)
agent.train_online(episodes=8192, batch_size=256, gamma=0.95, epsilon=1, epsilon_decay=0.999,
prioritized_fraction=0.25, test_interval=16, test_steps=test_steps,
log_prefix=str(simulator_index))
# Transfer learning test - train the agent on the previously unseen (only used for testing) game
if simulator_index != 0:
agent.clear_experience()
agent.train_simulators = test_simulators if isinstance(test_simulators, list) else [test_simulators]
agent.train_online(episodes=8192, batch_size=256, gamma=0.95, epsilon=1, epsilon_decay=0.999,
prioritized_fraction=0.25, test_interval=16, test_steps=test_steps,
log_prefix=('transfer' + str(simulator_index)))
|
11516582
|
import argparse
import json
import logging
import os
import time
from typing import Dict, List, Tuple, Union
import grpc
import mlflow
import numpy as np
from PIL import Image
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import accuracy_score
from src.proto import onnx_ml_pb2, predict_pb2, prediction_service_pb2_grpc
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class PytorchImagePreprocessTransformer(
BaseEstimator,
TransformerMixin,
):
def __init__(
self,
image_size: Tuple[int, int] = (32, 32),
prediction_shape: Tuple[int, int, int, int] = (1, 3, 32, 32),
mean_vec: List[float] = [0.485, 0.456, 0.406],
stddev_vec: List[float] = [0.229, 0.224, 0.225],
):
self.image_size = image_size
self.prediction_shape = prediction_shape
self.mean_vec = mean_vec
self.stddev_vec = stddev_vec
def fit(self, X, y=None):
return self
def transform(self, X: Union[Image.Image, np.ndarray]) -> np.ndarray:
if isinstance(X, np.ndarray):
dim_0 = (3,) + self.image_size
dim_1 = self.image_size + (3,)
if X.shape != dim_0 and X.shape != dim_1:
raise ValueError(f"resize to image_size {self.image_size} beforehand for numpy array")
else:
X = np.array(X.resize(self.image_size))
image_data = X.transpose(2, 0, 1).astype(np.float32)
mean_vec = np.array(self.mean_vec)
stddev_vec = np.array(self.stddev_vec)
norm_image_data = np.zeros(image_data.shape).astype(np.float32)
for i in range(image_data.shape[0]):
norm_image_data[i, :, :] = (image_data[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i]
norm_image_data = norm_image_data.reshape(self.prediction_shape).astype(np.float32)
return norm_image_data
class SoftmaxTransformer(
BaseEstimator,
TransformerMixin,
):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(
self,
X: Union[np.ndarray, List[float], List[List[float]]],
) -> np.ndarray:
if isinstance(X, List):
X = np.array(X)
x = X.reshape(-1)
e_x = np.exp(x - np.max(x))
result = np.array([e_x / e_x.sum(axis=0)])
return result
class Classifier(object):
def __init__(
self,
preprocess_transformer: BaseEstimator = PytorchImagePreprocessTransformer,
softmax_transformer: BaseEstimator = SoftmaxTransformer,
serving_address: str = "localhost:50051",
onnx_input_name: str = "input",
onnx_output_name: str = "output",
):
self.preprocess_transformer: BaseEstimator = preprocess_transformer()
self.preprocess_transformer.fit(None)
self.softmax_transformer: BaseEstimator = softmax_transformer()
self.softmax_transformer.fit(None)
self.serving_address = serving_address
self.channel = grpc.insecure_channel(self.serving_address)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(self.channel)
self.onnx_input_name: str = onnx_input_name
self.onnx_output_name: str = onnx_output_name
def predict(self, data: Image) -> List[float]:
preprocessed = self.preprocess_transformer.transform(data)
input_tensor = onnx_ml_pb2.TensorProto()
input_tensor.dims.extend(preprocessed.shape)
input_tensor.data_type = 1
input_tensor.raw_data = preprocessed.tobytes()
request_message = predict_pb2.PredictRequest()
request_message.inputs[self.onnx_input_name].data_type = input_tensor.data_type
request_message.inputs[self.onnx_input_name].dims.extend(preprocessed.shape)
request_message.inputs[self.onnx_input_name].raw_data = input_tensor.raw_data
response = self.stub.Predict(request_message)
output = np.frombuffer(response.outputs[self.onnx_output_name].raw_data, dtype=np.float32)
softmax = self.softmax_transformer.transform(output).tolist()
logger.info(f"predict proba {softmax}")
return softmax
def predict_label(self, data: Image) -> int:
softmax = self.predict(data=data)
argmax = int(np.argmax(np.array(softmax)[0]))
logger.info(f"predict label {argmax}")
return argmax
def evaluate(
test_data_directory: str,
preprocess_transformer: BaseEstimator = PytorchImagePreprocessTransformer,
softmax_transformer: BaseEstimator = SoftmaxTransformer,
) -> Dict:
classifier = Classifier(
preprocess_transformer=preprocess_transformer,
softmax_transformer=softmax_transformer,
serving_address="localhost:50051",
onnx_input_name="input",
onnx_output_name="output",
)
directory_list = os.listdir(test_data_directory)
predictions = {}
predicted = []
labels = []
durations = []
for c in directory_list:
c_path = os.path.join(test_data_directory, c)
c_list = os.listdir(c_path)
for f in c_list:
image_path = os.path.join(c_path, f)
image = Image.open(image_path)
start = time.time()
x = classifier.predict_label(image)
end = time.time()
duration = end - start
predicted.append(x)
labels.append(int(c))
durations.append(duration)
predictions[image_path] = {"label": c, "prediction": x}
logger.info(f"{image_path} label: {c} predicted: {x} duration: {duration} seconds")
total_time = sum(durations)
total_tested = len(predicted)
average_duration_second = total_time / total_tested
accuracy = accuracy_score(labels, predicted)
evaluation = {
"total_tested": total_tested,
"accuracy": accuracy,
"total_time": total_time,
"average_duration_second": average_duration_second,
}
return {"evaluation": evaluation, "predictions": predictions}
def main():
parser = argparse.ArgumentParser(
description="Evaluate Cifar10 model",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--upstream",
type=str,
default="/opt/data/train",
help="upstream directory",
)
parser.add_argument(
"--downstream",
type=str,
default="/opt/data/evaluate/",
help="downstream directory",
)
parser.add_argument(
"--test_data_directory",
type=str,
default="/opt/data/preprocess/test",
help="test data directory",
)
args = parser.parse_args()
mlflow_experiment_id = int(os.getenv("MLFLOW_EXPERIMENT_ID", 0))
upstream_directory = args.upstream
downstream_directory = args.downstream
os.makedirs(upstream_directory, exist_ok=True)
os.makedirs(downstream_directory, exist_ok=True)
result = evaluate(
test_data_directory=args.test_data_directory,
)
log_file = os.path.join(downstream_directory, f"{mlflow_experiment_id}.json")
with open(log_file, "w") as f:
json.dump(log_file, f)
mlflow.log_metric(
"total_tested",
result["evaluation"]["total_tested"],
)
mlflow.log_metric(
"total_time",
result["evaluation"]["total_time"],
)
mlflow.log_metric(
"accuracy",
result["evaluation"]["accuracy"],
)
mlflow.log_metric(
"average_duration_second",
result["evaluation"]["average_duration_second"],
)
mlflow.log_artifact(log_file)
if __name__ == "__main__":
main()
|
11516590
|
import json
import subprocess
from typing import Tuple
from benchmark.common.constants import URL, LineStatus, ScannerType
from benchmark.scanner.scanner import Scanner
class CredSweeper(Scanner):
def __init__(self, working_dir: str, cred_data_dir: str) -> None:
super().__init__(ScannerType.CREDSWEEPER, URL.CREDSWEEPER, working_dir, cred_data_dir)
self.output_dir: str = f"{self.scanner_dir}/output.json"
@property
def output_dir(self) -> str:
return self._output_dir
@output_dir.setter
def output_dir(self, output_dir: str) -> None:
self._output_dir = output_dir
def init_scanner(self) -> None:
subprocess.call(["virtualenv", "venv"], cwd=self.scanner_dir)
subprocess.call(["./venv/bin/python", "-m", "pip", "install", "-qr", "requirements.txt"], cwd=self.scanner_dir)
def run_scanner(self) -> None:
self.init_scanner()
subprocess.call([
"./venv/bin/python", "-m", "credsweeper", "--path", f"{self.cred_data_dir}/data", "--ml_validation",
"--save-json", self.output_dir
],
cwd=self.scanner_dir)
def parse_result(self) -> Tuple[int, int, int, int]:
with open(self.output_dir, "r") as f:
data = json.load(f)
result_cnt = lost_cnt = true_cnt = false_cnt = 0
for result in data:
for line_data in result["line_data_list"]:
if line_data["path"].split("/")[-1] == "LICENSE":
continue
result_cnt += 1
check_line_result, line_data["project_id"], line_data["per_repo_file_id"] = self.check_line_from_meta(
line_data["path"], line_data["line_num"])
if check_line_result == LineStatus.TRUE:
line_data["TP"] = "O"
true_cnt += 1
elif check_line_result == LineStatus.FALSE:
line_data["TP"] = "X"
false_cnt += 1
elif check_line_result == LineStatus.NOT_IN_DB:
line_data["TP"] = "N"
lost_cnt += 1
elif check_line_result == LineStatus.CHECKED:
line_data["TP"] = "C"
result_cnt -= 1
line_data["line"] = line_data["line"].strip()
line_data["rule"] = result["rule"]
line_data["severity"] = result["severity"]
line_data["api_validation"] = result["api_validation"]
line_data["ml_validation"] = result["ml_validation"]
return result_cnt, lost_cnt, true_cnt, false_cnt
|
11516600
|
import os
from galaxy.tool_shed.galaxy_install.tools import tool_panel_manager
from galaxy.util import parse_xml
from tool_shed.tools import tool_version_manager
from ..tools.test_toolbox import (
BaseToolBoxTestCase,
SimplifiedToolBox
)
DEFAULT_GUID = "123456"
class ToolPanelManagerTestCase(BaseToolBoxTestCase):
def get_new_toolbox(self):
return SimplifiedToolBox(self)
def test_handle_tool_panel_section(self):
self._init_tool()
self._add_config("""<toolbox><section id="tid" name="test"><tool file="tool.xml" /></section></toolbox>""")
toolbox = self.toolbox
tpm = self.tpm
# Test fetch existing section by id.
section_id, section = tpm.handle_tool_panel_section(toolbox, tool_panel_section_id="tid")
assert section_id == "tid"
assert len(section.elems) == 1 # tool.xml
assert section.id == "tid"
assert len(toolbox._tool_panel) == 1
section_id, section = tpm.handle_tool_panel_section(toolbox, new_tool_panel_section_label="tid2")
assert section_id == "tid2"
assert len(section.elems) == 0 # new section
assert section.id == "tid2"
assert len(toolbox._tool_panel) == 2
# Test re-fetch new section by same id.
section_id, section = tpm.handle_tool_panel_section(toolbox, new_tool_panel_section_label="tid2")
assert section_id == "tid2"
assert len(section.elems) == 0 # new section
assert section.id == "tid2"
assert len(toolbox._tool_panel) == 2
def test_add_tool_to_panel(self):
self._init_ts_tool(guid=DEFAULT_GUID)
self._init_dynamic_tool_conf()
tool_path = self._tool_path()
new_tools = [{"guid": DEFAULT_GUID, "tool_config": tool_path}]
repository_tools_tups = [
(
tool_path,
DEFAULT_GUID,
self.tool,
)
]
_, section = self.toolbox.get_section("tid1", create_if_needed=True)
tpm = self.tpm
tool_panel_dict = tpm.generate_tool_panel_dict_for_new_install(
tool_dicts=new_tools,
tool_section=section,
)
tpm.add_to_tool_panel(
repository_name="test_repo",
repository_clone_url="http://github.com/galaxyproject/example.git",
changeset_revision="0123456789abcde",
repository_tools_tups=repository_tools_tups,
owner="devteam",
shed_tool_conf="tool_conf.xml",
tool_panel_dict=tool_panel_dict,
)
self._verify_tool_confs()
def test_add_twice(self):
self._init_dynamic_tool_conf()
previous_guid = None
for v in "1", "2", "3":
self.__toolbox = self.get_new_toolbox()
changeset = "0123456789abcde%s" % v
guid = DEFAULT_GUID + ("v/%s" % v)
tool = self._init_ts_tool(guid=guid, filename="tool_v%s.xml" % v, version=v)
tool_path = self._tool_path(name="tool_v%s.xml" % v)
new_tools = [{"guid": guid, "tool_config": tool_path}]
self._repo_install(changeset)
repository_tools_tups = [
(
tool_path,
guid,
tool,
)
]
_, section = self.toolbox.get_section("tid1", create_if_needed=True)
tpm = self.tpm
tool_panel_dict = tpm.generate_tool_panel_dict_for_new_install(
tool_dicts=new_tools,
tool_section=section,
)
tpm.add_to_tool_panel(
repository_name="example",
repository_clone_url="github.com",
changeset_revision=changeset,
repository_tools_tups=repository_tools_tups,
owner="galaxyproject",
shed_tool_conf="tool_conf.xml",
tool_panel_dict=tool_panel_dict,
)
self._verify_tool_confs()
section = self.toolbox._tool_panel["tid1"]
# New GUID replaced old one in tool panel but both
# appear in integrated tool panel.
if previous_guid:
assert ("tool_%s" % previous_guid) not in section.panel_items()
assert ("tool_%s" % guid) in self.toolbox._integrated_tool_panel["tid1"].panel_items()
previous_guid = guid
def test_uninstall_in_section(self):
self._setup_two_versions_remove_one(section=True, uninstall=True)
self._verify_version_2_removed_from_panel()
# Not in tool conf because it was uninstalled.
assert "github.com/galaxyproject/example/test_tool/0.2" not in open(os.path.join(self.test_directory, "tool_conf.xml")).read()
new_toolbox = self.get_new_toolbox()
assert "tool_github.com/galaxyproject/example/test_tool/0.2" not in new_toolbox._integrated_tool_panel["tid"].elems
self._verify_tool_confs()
def test_uninstall_outside_section(self):
self._setup_two_versions_remove_one(section=False, uninstall=True)
self._verify_version_2_removed_from_panel(section=False)
# Still in tool conf since not uninstalled only deactivated...
assert "github.com/galaxyproject/example/test_tool/0.2" not in open(os.path.join(self.test_directory, "tool_conf.xml")).read()
self._verify_tool_confs()
self._remove_repository_contents("github.com/galaxyproject/example/test_tool/0.1", uninstall=True)
# Now no versions of this tool are returned by new toolbox.
new_toolbox = self.get_new_toolbox()
all_versions = new_toolbox.get_tool("test_tool", get_all_versions=True)
assert not all_versions
def _setup_two_versions_remove_one(self, section, uninstall):
self._init_tool()
self._setup_two_versions_in_config(section=section)
self._setup_two_versions()
self._remove_repository_contents("github.com/galaxyproject/example/test_tool/0.2", uninstall=uninstall)
def _verify_version_2_removed_from_panel(self, section=True):
# Check that test_tool now only has one version...
# We load a new toolbox
new_toolbox = self.get_new_toolbox()
all_versions = new_toolbox.get_tool("test_tool", get_all_versions=True)
assert len(all_versions) == 1
# Check that tool panel has reverted to old value...
if section:
section = new_toolbox._tool_panel["tid"]
assert len(section.elems) == 1
assert next(iter(section.elems.values())).id == "github.com/galaxyproject/example/test_tool/0.1"
assert "github.com/galaxyproject/example/test_tool/0.2" not in new_toolbox._integrated_tool_panel["tid"].elems
else:
assert next(iter(new_toolbox._tool_panel.values())).id == "github.com/galaxyproject/example/test_tool/0.1"
assert "github.com/galaxyproject/example/test_tool/0.2" not in new_toolbox._integrated_tool_panel
def _remove_repository_contents(self, guid, uninstall, shed_tool_conf="tool_conf.xml"):
tool = self.toolbox.get_tool(guid)
repository = tool.tool_shed_repository
self.tpm.remove_repository_contents(
repository=repository,
shed_tool_conf=shed_tool_conf,
uninstall=uninstall,
)
def _verify_tool_confs(self):
self._assert_valid_xml(self.integerated_tool_panel_path)
self._assert_valid_xml(os.path.join(self.test_directory, "tool_conf.xml"))
def _assert_valid_xml(self, filename):
try:
parse_xml(filename)
except Exception:
message_template = "file %s does not contain valid XML, content %s"
message = message_template % (filename, open(filename).read())
raise AssertionError(message)
def _init_ts_tool(self, guid=DEFAULT_GUID, **kwds):
tool = self._init_tool(**kwds)
tool.guid = guid
tool.version = kwds.get('version', '1.0')
return tool
@property
def tpm(self):
return tool_panel_manager.ToolPanelManager(self.app)
@property
def tvm(self):
return tool_version_manager.ToolVersionManager(self.app)
|
11516602
|
import os
from pathlib import Path
import shutil
from shutil import copyfile
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/home/guest/mabdelfa/zerowaste/DRS/utils/transforms')
from PIL import Image
import imageio
import numpy
from numpy import asarray
import numpy as np
#/research/axns2/mabdelfa/before_after_voc_format/SegmentationClass
#VOC2012
#before_after_voc_format
#dataset_conversion
#/research/axns2/mabdelfa/sem_conv
directory_of_interest = "/research/axns2/mabdelfa/TACO/data/batch_"
count = 0
total = 15
total_files = 0
all_old_names = [[]]
for i in range(1, total+1):
my_dir = directory_of_interest + str(i)
directory = os.fsencode(my_dir)
path, dirs, files = next(os.walk(directory))
file_count = len(files)
total_files += file_count
names = []
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg") or filename.endswith(".JPG"):
count +=1
names.append(filename)
#my_file = Path(my_dir + "/" + filename)
#save_path = "/research/axns2/mabdelfa/TACO/data/coco_format/data/" + filename
#shutil.copy2(my_file, save_path)
all_old_names.append(names)
print('total_files: ' , total_files)
f = open("map_names.txt", "w")
counter = 0
batch_n = -1
for batch in all_old_names:
batch_n += 1
batch.sort()
for old_name in batch:
new_name = str(counter) + '.jpg'
if not os.path.isfile('batch_' + str(batch_n) + '/' + old_name):
print('Big problem, this file does not exits.... ' + 'batch_' + str(batch_n) + '/' + old_name)
f.write('batch_' + str(batch_n) + '/' + old_name + ' ' + new_name + '\n')
counter += 1
f.close()
|
11516608
|
from bearlibterminal import terminal
from clubsandwich.blt.loop import BearLibTerminalEventLoop
i = 0
j = 0
class MyDemo(BearLibTerminalEventLoop):
def __init__(self):
super().__init__()
self.should_exit = False
def terminal_init(self):
terminal.print(0, 1, "Cmd+Q/Alt+F4/whatever to quit")
def terminal_read(self, val):
self.should_exit = val == terminal.TK_CLOSE
def terminal_update(self):
global i
global j
terminal.put(j, 0, str(i))
i = (i + 1) % 10
j = (j + 1) % 11
return not self.should_exit
MyDemo().run()
|
11516615
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
# Apply unfold operation to input in order to prepare it to be processed against a sliding kernel whose shape
# is passed as argument.
def unfold_map2d(input, kernel_height, kernel_width):
# Before performing an operation between an input and a sliding kernel we need to unfold the input, i.e. the
# windows on which the kernel is going to be applied are extracted and set apart. For this purpose, the kernel
# shape is passed as argument to the operation. The single extracted windows are reshaped by the unfold operation
# to rank 1 vectors. The output of F.unfold(input, (kernel_height, kernel_width)).transpose(1, 2) is a
# tensor structured as follows: the first dimension is the batch dimension; the second dimension is the slide
# dimension, i.e. each element is a window extracted at a different offset (and reshaped to a rank 1 vector);
# the third dimension is a scalar within said vector.
inp_unf = F.unfold(input, (kernel_height, kernel_width)).transpose(1, 2)
# Now we need to reshape our tensors to the actual shape that we want in output, which is the following: the
# first dimension is the batch dimension, the second dimension is the output channels dimension, the third and
# fourth are height and width dimensions (obtained by splitting the former third dimension, the slide dimension,
# representing a linear offset within the input map, into two new dimensions representing height and width), the
# fifth is the window components dimension, corresponding to the elements of a window extracted from the input with
# the unfold operation (reshaped to rank 1 vectors). The resulting tensor is then returned.
inp_unf = inp_unf.view(
input.size(0), # Batch dimension
1, # Output channels dimension
input.size(2) - kernel_height + 1, # Height dimension
input.size(3) - kernel_width + 1, # Width dimension
-1 # Filter/window dimension
)
return inp_unf
# Custom vectorial function representing sum of an input with a sliding kernel, just like convolution is multiplication
# by a sliding kernel (as an analogy think convolution as a kernel_mult2d)
def kernel_sum2d(input, kernel):
# In order to perform the sum with the sliding kernel we first need to unfold the input. The resulting tensor will
# have the following structure: the first dimension is the batch dimension, the second dimension is the output
# channels dimension, the third and fourth are height and width dimensions, the fifth is the filter/window
# components dimension, corresponding to the elements of a window extracted from the input with the unfold
# operation and equivalently to the elements of a filter (reshaped to rank 1 vectors)
inp_unf = unfold_map2d(input, kernel.size(2), kernel.size(3))
# At this point the two tensors can be summed. The kernel is reshaped by unsqueezing singleton dimensions along
# the batch dimension and the height and width dimensions. By exploiting broadcasting, it happens that the inp_unf
# tensor is broadcast over the output channels dimension (since its shape along this dimension is 1) and therefore
# it is automatically processed against the different filters of the kernel. In the same way, the kernel is
# broadcast along the first dimension (and thus automatically processed against the different inputs along
# the batch dimension) and along the third and fourth dimensions (and thus automatically processed against
# different windows extracted from the image at different height and width offsets).
out = inp_unf + kernel.view(1, kernel.size(0), 1, 1, -1)
return out
# Test the implementation of the kernel_sum2d function
def test_kernelsum():
x = torch.randn(
8, # Batch dimension
3, # Input channels dimension
10, # Height dimension
12 # Width dimension
)
w = torch.randn(
6, # Output channels dimension
3, # Input channels dimension
4, # Height dimension
5 # Width dimension
)
output = torch.empty(
x.shape[0], # Batch dimension
w.shape[0], # Output channels dimension
x.shape[2] - w.shape[2] + 1, # Height dimension
x.shape[3] - w.shape[3] + 1, # Width dimension
w.shape[1] * w.shape[2] * w.shape[3] # Filter dimension
)
# Cross-validate vectorial implementation with for-loop implementation
for batch in range(0, x.shape[0]): # Loop over batch dimension
for outchn in range(0, w.shape[0]): # Loop over output channel dimension
for i in range(0, x.shape[2] - w.shape[2] + 1): # Loop over height dimension
for j in range(0, x.shape[3] - w.shape[3] + 1): # Loop over width dimension
output[batch, outchn, i, j, :] = (x[batch, :, i:i + w.shape[2], j:j + w.shape[3]] + w[outchn, :, :, :]).view(-1)
out = kernel_sum2d(x, w)
print((output.equal(out))) # Should print out True
# Compute product between input and sliding kernel
def kernel_mult2d(x, w, b=None):
return F.conv2d(x, w, b)
# Projection of input on weight vectors
def vector_proj2d(x, w, bias=None):
# Compute scalar product with sliding kernel
prod = kernel_mult2d(x, w)
# Divide by the norm of the weight vector to obtain the projection
norm_w = torch.norm(w.view(w.size(0), -1), p=2, dim=1).view(1, -1, 1, 1)
norm_w += (norm_w == 0).float() # Prevent divisions by zero
if bias is None: return prod / norm_w
return prod / norm_w + bias.view(1, -1, 1, 1)
# Projection of input on weight vector clipped between 0 and +inf
def clp_vector_proj2d(x, w, bias=None):
return vector_proj2d(x, w, bias).clamp(0)
# Sigmoid similarity
def sig_sim2d(x, w, bias=None):
proj = vector_proj2d(x, w, bias)
#return torch.sigmoid((proj - proj.mean())/proj.std())
return torch.sigmoid(proj)
# Cosine similarity between an input map and a sliding kernel
def cos_sim2d(x, w, bias=None):
proj = vector_proj2d(x, w)
# Divide by the norm of the input to obtain the cosine similarity
x_unf = unfold_map2d(x, w.size(2), w.size(3))
norm_x = torch.norm(x_unf, p=2, dim=4)
norm_x += (norm_x == 0).float() # Prevent divisions by zero
if bias is None: return proj / norm_x
return (proj / norm_x + bias.view(1, -1, 1, 1)).clamp(-1, 1)
# Cosine similarity clipped between 0 and 1
def clp_cos_sim2d(x, w, bias=None):
return cos_sim2d(x, w, bias).clamp(0)
# Cosine similarity remapped to 0, 1
def raised_cos2d(x, w, bias=None):
return (cos_sim2d(x, w, bias) + 1) / 2
# Returns function that computes raised cosine power p
def raised_cos2d_pow(p=2):
def raised_cos2d_pow_p(x, w, bias=None):
if bias is None: return raised_cos2d(x, w).pow(p)
return (raised_cos2d(x, w).pow(p) + bias.view(1, -1, 1, 1)).clamp(0, 1)
return raised_cos2d_pow_p
# Softmax on weight vector projection activation function
def proj_smax2d(x, w, bias=None):
e_pow_y = torch.exp(vector_proj2d(x, w, bias))
return e_pow_y / e_pow_y.sum(1, keepdims=True)
# Response of a gaussian activation function
def gauss(x, w, sigma=None):
d = torch.norm(kernel_sum2d(x, -w), p=2, dim=4)
if sigma is None: return torch.exp(-d.pow(2) / (2*utils.shape2size(tuple(w[0].size())))) # heuristic: use number of dimensions as variance
#if sigma is None: return torch.exp(-d.pow(2) / (2 * torch.norm(w.view(w.size(0), 1, -1) - w.view(1, w.size(0), -1), p=2, dim=2).max().pow(2)/w.size(0))) # heuristic: normalization condition
#if sigma is None: return torch.exp(-d.pow(2) / (2 * d.mean().pow(2)))
return torch.exp(-d.pow(2) / (2 * (sigma.view(1, -1, 1, 1).pow(2))))
# Returns lambda function for exponentially decreasing learning rate scheduling
def sched_exp(tau=1000, eta_min=0.01):
gamma = torch.exp(torch.tensor(-1./tau)).item()
return lambda eta: (eta * gamma).clamp(eta_min)
# This module represents a layer of convolutional neurons that are trained with a Hebbian-WTA rule
class HebbianMap2d(nn.Module):
# Types of learning rules
RULE_BASE = 'base' # delta_w = eta * lfb * (x - w)
RULE_HEBB = 'hebb' # delta_w = eta * y * lfb * (x - w)
# Types of LFB kernels
LFB_GAUSS = 'gauss'
LFB_DoG = 'DoG'
LFB_EXP = 'exp'
LFB_DoE = 'DoE'
def __init__(self,
in_channels,
out_size,
kernel_size,
competitive=True,
random_abstention=False,
lfb_value=0,
similarity=raised_cos2d_pow(2),
out=vector_proj2d,
weight_upd_rule=RULE_BASE,
eta=0.1,
lr_schedule=None,
tau=1000):
super(HebbianMap2d, self).__init__()
# Init weights
out_size_list = [out_size] if not hasattr(out_size, '__len__') else out_size
self.out_size = torch.tensor(out_size_list[0:min(len(out_size_list), 3)])
out_channels = self.out_size.prod().item()
if hasattr(kernel_size, '__len__') and len(kernel_size) == 1: kernel_size = kernel_size[0]
if not hasattr(kernel_size, '__len__'): kernel_size = [kernel_size, kernel_size]
stdv = 1 / (in_channels * kernel_size[0] * kernel_size[1]) ** 0.5
self.register_buffer('weight', torch.empty(out_channels, in_channels, kernel_size[0], kernel_size[1]))
nn.init.uniform_(self.weight, -stdv, stdv) # Same initialization used by default pytorch conv modules (the one from the paper "Efficient Backprop, LeCun")
# Enable/disable features as random abstention, competitive learning, lateral feedback
self.competitive = competitive
self.random_abstention = competitive and random_abstention
self.lfb_on = competitive and isinstance(lfb_value, str)
self.lfb_value = lfb_value
# Set output function, similarity function and learning rule
self.similarity = similarity
self.out = out
self.teacher_signal = None # Teacher signal for supervised training
self.weight_upd_rule = weight_upd_rule
# Initial learning rate and lR scheduling policy. LR wrapped into a registered buffer so that we can save/load it
self.register_buffer('eta', torch.tensor(eta))
self.lr_schedule = lr_schedule # LR scheduling policy
# Set parameters related to the lateral feedback feature
if self.lfb_on:
# Prepare the variables to generate the kernel that will be used to apply lateral feedback
map_radius = (self.out_size - 1) // 2
sigma_lfb = map_radius.max().item()
x = torch.abs(torch.arange(0, self.out_size[0].item()) - map_radius[0])
for i in range(1, self.out_size.size(0)):
x_new = torch.abs(torch.arange(0, self.out_size[i].item()) - map_radius[i])
for j in range(i): x_new = x_new.unsqueeze(j)
x = torch.max(x.unsqueeze(-1), x_new) # max gives L_infinity distance, sum would give L_1 distance, root_p(sum x^p) for L_p
# Store the kernel that will be used to apply lateral feedback in a registered buffer
if lfb_value == self.LFB_EXP or lfb_value == self.LFB_DoE: self.register_buffer('lfb_kernel', torch.exp(-x.float() / sigma_lfb))
else: self.register_buffer('lfb_kernel', torch.exp(-x.pow(2).float() / (2 * (sigma_lfb ** 2))))
# Padding that will pad the inputs before applying the lfb kernel
pad_pre = map_radius.unsqueeze(1)
pad_post = (self.out_size - 1 - map_radius).unsqueeze(1)
self.pad = tuple(torch.cat((pad_pre, pad_post), dim=1).flip(0).view(-1))
# LFB kernel shrinking parameter
self.alpha = torch.exp( torch.log(torch.tensor(sigma_lfb).float()) / tau ).item()
if lfb_value == self.LFB_GAUSS or lfb_value == self.LFB_DoG: self.alpha = self.alpha ** 2
else: self.register_buffer('lfb_kernel', None)
# Init variables for statistics collection
if self.random_abstention: self.register_buffer('victories_count', torch.zeros(out_channels))
else: self.register_buffer('victories_count', None)
def set_teacher_signal(self, y):
self.teacher_signal = y
def forward(self, x):
y = self.out(x, self.weight)
if self.training: self.update(x)
return y
def update(self, x):
# Prepare the inputs
y = self.similarity(x, self.weight)
t = self.teacher_signal
if t is not None: t = t.unsqueeze(2).unsqueeze(3) * torch.ones_like(y, device=y.device)
y = y.permute(0, 2, 3, 1).contiguous().view(-1, self.weight.size(0))
if t is not None: t = t.permute(0, 2, 3, 1).contiguous().view(-1, self.weight.size(0))
x_unf = unfold_map2d(x, self.weight.size(2), self.weight.size(3))
x_unf = x_unf.permute(0, 2, 3, 1, 4).contiguous().view(y.size(0), 1, -1)
# Random abstention
if self.random_abstention:
abst_prob = self.victories_count / (self.victories_count.max() + y.size(0) / y.size(1)).clamp(1)
scores = y * (torch.rand_like(abst_prob, device=y.device) >= abst_prob).float().unsqueeze(0)
else: scores = y
# Competition. The returned winner_mask is a bitmap telling where a neuron won and where one lost.
if self.competitive:
if t is not None: scores *= t
winner_mask = (scores == scores.max(1, keepdim=True)[0]).float()
if self.random_abstention: # Update statistics if using random abstension
winner_mask_sum = winner_mask.sum(0) # Number of inputs over which a neuron won
self.victories_count += winner_mask_sum
self.victories_count -= self.victories_count.min().item()
else: winner_mask = torch.ones_like(y, device=y.device)
# Lateral feedback
if self.lfb_on:
lfb_kernel = self.lfb_kernel
if self.lfb_value == self.LFB_DoG or self.lfb_value == self.LFB_DoE: lfb_kernel = 2 * lfb_kernel - lfb_kernel.pow(0.5) # Difference of Gaussians/Exponentials (mexican hat shaped function)
lfb_in = F.pad(winner_mask.view(-1, *self.out_size), self.pad)
if self.out_size.size(0) == 1: lfb_out = torch.conv1d(lfb_in.unsqueeze(1), lfb_kernel.unsqueeze(0).unsqueeze(1))
elif self.out_size.size(0) == 2: lfb_out = torch.conv2d(lfb_in.unsqueeze(1), lfb_kernel.unsqueeze(0).unsqueeze(1))
else: lfb_out = torch.conv3d(lfb_in.unsqueeze(1), lfb_kernel.unsqueeze(0).unsqueeze(1))
lfb_out = lfb_out.clamp(-1, 1).view_as(y)
else:
lfb_out = winner_mask
if self.competitive: lfb_out[lfb_out == 0] = self.lfb_value
elif t is not None: lfb_out = t
# Compute step modulation coefficient
r = lfb_out # RULE_BASE
if self.weight_upd_rule == self.RULE_HEBB: r *= y
# Compute delta
r_abs = r.abs()
r_sign = r.sign()
delta_w = r_abs.unsqueeze(2) * (r_sign.unsqueeze(2) * x_unf - self.weight.view(1, self.weight.size(0), -1))
# Since we use batches of inputs, we need to aggregate the different update steps of each kernel in a unique
# update. We do this by taking the weighted average of teh steps, the weights being the r coefficients that
# determine the length of each step
r_sum = r_abs.sum(0)
r_sum += (r_sum == 0).float() # Prevent divisions by zero
delta_w_avg = (delta_w * r_abs.unsqueeze(2)).sum(0) / r_sum.unsqueeze(1)
# Apply delta
self.weight += self.eta * delta_w_avg.view_as(self.weight)
# LFB kernel shrinking and LR schedule
if self.lfb_on: self.lfb_kernel = self.lfb_kernel.pow(self.alpha)
if self.lr_schedule is not None: self.eta = self.lr_schedule(self.eta)
# Generate a batch of random inputs for testing
def gen_batch(centers, batch_size, win_height, win_width):
# Generate an input "image" by first generating patches as random perturbations on the cluster centers and then
# concatenating them in the horizontal and vertical dimensions. Repeat to generate a batch.
batch = torch.empty(0)
for j in range(batch_size): # Loop to generate batch
image = torch.empty(0)
for k in range(win_height): # Loop to concat image rows vertically
row = torch.empty(0)
for l in range(win_width): # Loop to concat patches horizontally
# Generate an input patch by perturbing a cluster center
index = int(torch.floor(torch.rand(1) * centers.size(0)).item())
patch = centers[index] + 0.1 * torch.randn_like(centers[index])
# Concatenate patch horizonally to the image row
row = torch.cat((row, patch), 2)
# Concatenate row to the image vertically
image = torch.cat((image, row), 1)
# Concatenate the image to the batch
batch = torch.cat((batch, image.unsqueeze(0)), 0)
return batch
# Test for the batch generation function
def test_genbatch():
# Generate centers around which clusters are built
centers = torch.randn(6, 3, 4, 5)
# Generate a batch of inputs around the centers
batch = gen_batch(centers, 10, 2, 2)
# Check that the batch size is correct (just to be sure)
print(batch.size()) # Should print 10x3x8x10
# Test the implementation of the HebbianMap2d
def test_hebbianmap():
# Function for printing summary information
def print_results(model, centers):
print('\n' + '#'*79 + '\n')
responses = model(centers).squeeze()
top_act, closest_neurons = responses.max(1)
for i in range(responses.size(0)): print("Closest neuron to center " + str(i) + ": " + str(closest_neurons[i].item()) + ", output: " + str(top_act[i].item()))
print()
top_act, closest_centers = responses.max(0)
for i in range(responses.size(1)): print("Closest center to neuron " + str(i) + ": " + str(closest_centers[i].item()) + ", output: " + str(top_act[i].item()))
print('\n' + '#' * 79 + '\n')
torch.random.manual_seed(3)
kernel_shape = (6, 3, 4, 5)
num_centers = 6
num_iter = 2000
batch_size = 10
win_height = 2
win_width = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = HebbianMap2d(
in_channels=kernel_shape[1],
out_size=kernel_shape[0],
kernel_size=[kernel_shape[2], kernel_shape[3]],
competitive=True,
random_abstention=False,
lfb_value=0,
similarity=raised_cos2d_pow(2),
out=cos_sim2d,
weight_upd_rule=HebbianMap2d.RULE_BASE,
eta=0.1,
lr_schedule=sched_exp(1000, 0.01),
tau=1000
)
model.eval()
model.to(device)
# Generate centers around which clusters are built
centers = torch.randn(num_centers, *kernel_shape[1:4])
# Check the distance between the centers and the randomly initialized weight vectors
print_results(model, centers)
# Train the model: generate a batch of inputs and feed it to the model, repeat for the desired number of iterations
model.train()
for i in range(num_iter):
batch = gen_batch(centers, batch_size, win_height, win_width)
batch = batch.to(device)
model(batch)
model.eval()
# Verify that the weight vectors of the model have converged to the cluster centers
print_results(model, centers)
if __name__=='__main__':
test_kernelsum()
test_genbatch()
test_hebbianmap()
|
11516722
|
from __future__ import absolute_import, division, print_function
from PySide2.QtCore import Qt, QAbstractItemModel, QModelIndex
import libtbx.phil
# =============================================================================
def check_phil(phil, scope=True, definition=True, raise_error=True):
"""
Convenience function for checking if the input is a libtbx.phil.scope
only or a libtbx.phil.definition only or either.
Parameters
----------
phil: object
The object to be tested
scope: bool
Flag to check if phil is a libtbx.phil.scope
definition: bool
Flag to check if phil is a libtbx.phil.definition
raise_error: bool
If true, a RuntimeError is raised if the check(s) fail
Returns
-------
value: bool
"""
value = False
if scope: # check for only libtbx.phil.scope
value = isinstance(phil, libtbx.phil.scope)
if definition: # check for only libtbx.phil.definition
value = isinstance(phil, libtbx.phil.definition)
if scope and definition: # check for either
value = isinstance(phil, libtbx.phil.scope) or isinstance(phil, libtbx.phil.definition)
if (scope and definition) and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope or libtbx.phil.definition is expected.')
elif scope and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope is expected.')
elif definition and not value and raise_error:
raise RuntimeError('A libtbx.phil.definition is expected.')
return value
# =============================================================================
class PhilItem(object):
"""
"""
# ---------------------------------------------------------------------------
def __init__(self, parent=None):
self._parent = parent
self._children = list()
self._type = Qt.UserRole + 1
# PHIL information
self.definition = None
self.full_path = None
self.value = None
# widget information
self.label_text = None
self.tooltip = None
# ---------------------------------------------------------------------------
def set_phil(self, phil, scope=True, definition=True):
check_phil(phil, scope=scope, definition=definition)
self.definition = phil
self.full_path = phil.full_path()
if phil.is_definition:
self.value = phil.extract()
else:
self.value = ''
# set label text for widget
self.label_text = ' '.join(self.definition.name.split('_')).capitalize()
if self.definition.short_caption is not None:
self.label_text = self.definition.short_caption
# set tooltip text to caption or help (preferred)
self.tooltip = self.full_path
if self.definition.caption is not None:
self.tooltip = self.full_path + '\n\n' + self.definition.caption
if self.definition.help is not None:
self.tooltip = self.full_path + '\n\n' + self.definition.help
# ---------------------------------------------------------------------------
def parent(self):
return self._parent
# ---------------------------------------------------------------------------
def appendChild(self, item):
self._children.append(item)
# ---------------------------------------------------------------------------
def childCount(self):
return len(self._children)
# ---------------------------------------------------------------------------
def child(self, row):
if row < self.childCount():
return self._children[row]
else:
raise RuntimeError('There is no child at row {row}.'.format(row=row))
# ---------------------------------------------------------------------------
def row(self):
if self._parent is None:
return 0
else:
return self._parent._children.index(self)
# ---------------------------------------------------------------------------
def type(self):
return self._type
# ---------------------------------------------------------------------------
def data(self, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
return str(self.value)
# ---------------------------------------------------------------------------
def setData(self, value, role):
if role == Qt.EditRole:
self.value = value
# =============================================================================
class PhilModel(QAbstractItemModel):
"""
The model component of a PHIL scope. This is used to map widgets to
the underlying PHIL scope and to update the data.
"""
# ---------------------------------------------------------------------------
def __init__(self, parent=None):
"""
"""
QAbstractItemModel.__init__(self, parent)
self._header_labels = ['parameter', 'value']
self._root = PhilItem()
# PHIL
self._scope = None
self._extract = None
# ---------------------------------------------------------------------------
def initialize_model(self, phil_scope):
check_phil(phil_scope, scope=True, definition=False)
self._scope = phil_scope
self._extract = phil_scope.extract()
# fill out model data with values from PHIL scope
self.beginResetModel()
self._populate_tree(self._scope, self._root)
self.endResetModel()
# ---------------------------------------------------------------------------
def get_phil_extract_value(self, full_path):
"""
Return the value given the full path
Parameters
----------
full_path: str
The full PHIL path
Returns
-------
value: object
The value stored in full_path. Can be a list if .multiple is True.
"""
value = self._extract
for subpath in full_path.split('.'):
if isinstance(value, libtbx.phil.scope_extract_list):
break
value = getattr(value, subpath)
return value
# ---------------------------------------------------------------------------
def set_phil_extract_value(self, full_path, value):
"""
Set the value given the full path
Parameters
----------
full_path: str
The full PHIL path
value: object
object to be stored at full_path
Returns
-------
Nothing
"""
paths = full_path.split('.')
extract = self._extract
for subpath in paths[:-1]:
extract = getattr(extract, subpath)
setattr(extract, paths[-1], value)
# ---------------------------------------------------------------------------
def get_master_phil(self):
"""
Function for getting the full master PHIL scope
Parameters
---------
None
Returns
-------
master_phil: libtbx.phil.scope
The original master PHIL scope
"""
return self._scope
# ---------------------------------------------------------------------------
def get_working_phil(self, diff_only=True):
"""
Function for getting the working PHIL scope
Parameters
----------
diff_only: bool
If True, only the differences are returned
Returns
-------
working_phil: libtbx.phil.scope
The working PHIL scope based on non-default settings
"""
working_phil = self._scope.format(python_object=self._extract)
if diff_only:
working_phil = self._scope.fetch_diff(working_phil)
return working_phil
# ---------------------------------------------------------------------------
def get_phil_extract(self):
"""
Function for getting the PHIL extract
Parameters
----------
None
Returns
-------
extract: libtbx.phil.extract
"""
return self._extract
# ---------------------------------------------------------------------------
def _populate_tree(self, leaf, branch):
"""
Recursively creates a tree of PhilItem objects for each PHIL
definition
Parameters
----------
leaf: libtbx.phil.scope or libtbx.phil.definition
If leaf is a PHIL definition, a PhilItem is created to represent
the definition, otherwise, new root is created with the name.
branch: PhilItem
A model item created from a leaf.
Returns
-------
Nothing
"""
if leaf.is_definition:
item = PhilItemFactory.get_phil_item(leaf, branch)
branch.appendChild(item)
else:
new_root = PhilItemFactory.get_phil_item(leaf, branch)
branch.appendChild(new_root)
for sub_branch in leaf.objects:
self._populate_tree(sub_branch, new_root)
# ---------------------------------------------------------------------------
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return self._header_labels[section]
else:
return None
# ---------------------------------------------------------------------------
def parent(self, index):
if not index.isValid():
return QModelIndex()
child = index.internalPointer()
parent = child.parent()
if parent == self._root:
return QModelIndex()
return self.createIndex(parent.row(), 0, parent)
# ---------------------------------------------------------------------------
def index(self, row, column, parent=QModelIndex()):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parent_item = self._root
else:
parent_item = parent.internalPointer()
child = parent_item.child(row)
if child is not None:
return self.createIndex(row, column, child)
else:
return QModelIndex()
# ---------------------------------------------------------------------------
def rowCount(self, parent=QModelIndex()):
if parent.column() > 0:
return 0
if not parent.isValid():
parent_item = self._root
else:
parent_item = parent.internalPointer()
return parent_item.childCount()
# ---------------------------------------------------------------------------
def columnCount(self, parent=QModelIndex()):
return len(self._header_labels)
# ---------------------------------------------------------------------------
def flags(self, index):
flags = QAbstractItemModel.flags(self, index)
if index.column() == 1:
flags = Qt.ItemIsEditable | flags
return flags
# ---------------------------------------------------------------------------
def data(self, index, role):
if not index.isValid():
return None
item = index.internalPointer()
value = item.data(role)
if role == Qt.DisplayRole:
if index.column() == 0:
return item.label_text
elif index.column() == 1:
return value
elif role == Qt.EditRole:
if index.column() == 1:
return value
else:
return None
# ---------------------------------------------------------------------------
def setData(self, index, value, role):
if role == Qt.EditRole:
if index.column() == 1:
item = index.internalPointer()
item.setData(value, role)
self.dataChanged.emit(index, index, [Qt.EditRole])
return True
return False
# =============================================================================
class PhilItemFactory(object):
mapping = {
'choice': PhilItem,
}
@classmethod
def get_phil_item(self, phil, parent):
if check_phil(phil, scope=False, definition=True, raise_error=False):
item_type = self.mapping.get(phil.type.phil_type, PhilItem)
item = item_type(parent=parent)
item.set_phil(phil)
else:
item = PhilItem(parent=parent)
item.set_phil(phil)
return item
# =============================================================================
|
11516728
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
SEED = 71 # 1337, 131, 71
np.random.seed(SEED) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from keras.optimizers import Adam, SGD, Optimizer
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.ensemble import BaggingClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
path = '../Data/'
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data(path+'train.csv', train=True)
#X=np.log(X+1)
#X=np.sqrt(X+(3/8))
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data(path+'test.csv', train=False)
#X_test=np.log(X_test+1)
#X_test=np.sqrt(X_test+(3/8))
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
sample = pd.read_csv(path+'sampleSubmission.csv')
N = X.shape[0]
trainId = np.array(range(N))
submissionTr = pd.DataFrame(index=trainId,columns=sample.columns[1:])
nfold=5
RND = np.random.randint(0,10000,nfold)
pred = np.zeros((X_test.shape[0],9))
score = np.zeros(nfold)
i=0
skf = StratifiedKFold(labels, nfold, random_state=SEED)
for tr, te in skf:
X_train, X_valid, y_train, y_valid = X[tr], X[te], y[tr], y[te]
predTr = np.zeros((X_valid.shape[0],9))
n_bag=10
for j in range(n_bag):
print('nfold: ',i,'/',nfold, ' n_bag: ',j,' /',n_bag)
print("Building model...")
model = Sequential()
model.add(Dense(512, input_shape=(dims,)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
ADAM=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
sgd=SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
earlystopping=EarlyStopping(monitor='val_loss', patience=10, verbose=1)
checkpointer = ModelCheckpoint(filepath=path+"tmp/weights.hdf5", verbose=0, save_best_only=True)
model.fit(X_train, y_train, nb_epoch=1000, batch_size=128, verbose=2,
validation_data=(X_valid,y_valid), callbacks=[earlystopping,checkpointer])
model.load_weights(path+"tmp/weights.hdf5")
print("Generating submission...")
pred += model.predict_proba(X_test)
predTr += model.predict_proba(X_valid)
predTr /= n_bag
submissionTr.iloc[te] = predTr
score[i]= log_loss(y_valid,predTr,eps=1e-15, normalize=True)
print(score[i])
i+=1
pred /= (nfold*n_bag)
print("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score)))
make_submission(pred, ids, encoder, fname=path+'kerasNN4.csv')
print(log_loss(labels,submissionTr.values,eps=1e-15, normalize=True))
submissionTr.to_csv(path+"kerasNN4_retrain.csv",index_label='id')
# SEED 1337
# nfold 2: 0.524464 + 0.002286
# nfold 3: 0.506091 + 0.0053104
# nfold 3, bagging 5: 0.485677 + 0.00456187
# nfold 5, bagging 5: 0.4751277 + 0.01058555
# nfold 5, bagging 10: 0.472651 + 0.00961547
|
11516740
|
from .main import build_network, build_autoencoder
from .mnist_LeNet import MNIST_LeNet, MNIST_LeNet_Decoder, MNIST_LeNet_Autoencoder
from .fmnist_LeNet import FashionMNIST_LeNet, FashionMNIST_LeNet_Decoder, FashionMNIST_LeNet_Autoencoder
from .cifar10_LeNet import CIFAR10_LeNet, CIFAR10_LeNet_Decoder, CIFAR10_LeNet_Autoencoder
from .mlp import MLP, MLP_Decoder, MLP_Autoencoder
from .layers.stochastic import GaussianSample
from .layers.standard import Standardize
from .inference.distributions import log_standard_gaussian, log_gaussian, log_standard_categorical
from .vae import VariationalAutoencoder, Encoder, Decoder
from .dgm import DeepGenerativeModel, StackedDeepGenerativeModel
|
11516784
|
import multiprocessing
import os
import typing
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from pyextremes import EVA
from pyextremes.extremes import ExtremesTransformer, get_extremes
from pyextremes.plotting import pyextremes_rc
def get_default_thresholds(
ts,
extremes_type: str,
num: int = 100,
) -> np.ndarray:
"""
Get an array of threshold values for given time series.
Used to generate an array of thresholds used to find
optimal threshold values in other methods.
Thresholds are generated as an array of equally spaced values
between 90th percentile and 10th largest value in the series for 'extremes_type'
being 'high' and between 10th smallest value and 10th percentile in the series
for 'extremes_type' being 'low'.
Parameters
----------
ts : array-like
Time series of the signal.
extremes_type : str
high - get extreme high values
low - get extreme low values
num : int, optional
Number of threshold values to generate.
By default is 100.
Returns
-------
thresholds : numpy.ndarray
Array with threshold values.
"""
if extremes_type == "high":
start = np.quantile(ts.values, 0.9)
stop = ts.sort_values(ascending=False).iloc[9]
elif extremes_type == "low":
start = np.quantile(ts.values, 0.1)
stop = ts.sort_values(ascending=True).iloc[9]
else:
raise ValueError(
f"invalid value in '{extremes_type}' for the 'extremes_type' argument"
)
return np.linspace(start=start, stop=stop, num=num)
def plot_mean_residual_life(
ts: pd.Series,
thresholds=None,
extremes_type: str = "high",
alpha: float = 0.95,
ax: typing.Optional[plt.Axes] = None,
figsize: tuple = (8, 5),
) -> plt.Axes:
"""
Plot mean residual life for given threshold values.
The mean residual life plot should be approximately linear above a threshold
for which the Generalized Pareto Distribution model is valid.
The strategy is to select the smallest (largest for extremes_type='low')
threshold value immediately above (below for extremes_type='low')
which the plot is approximately linear.
Parameters
----------
ts : pandas.Series
Time series of the signal.
thresholds : array-like, optional
An array of thresholds for which the mean residual life plot is plotted.
If None (default), plots mean residual life for 100 equally-spaced thresholds
between 90th (10th if extremes_type='high') percentile
and 10th largest (smallest if extremes_type='low') value in the series.
extremes_type : str, optional
high (default) - extreme high values
low - extreme low values
alpha : float, optional
Confidence interval width in the range (0, 1), by default it is 0.95.
If None, then confidence interval is not shown.
ax : matplotlib.axes._axes.Axes, optional
If provided, then the plot is drawn on this axes.
If None (default), new figure and axes are created
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
Returns
-------
matplotlib.axes._axes.Axes
Axes object.
"""
# Get default thresholds
if thresholds is None:
thresholds = get_default_thresholds(
ts=ts,
extremes_type=extremes_type,
num=100,
)
# Calculate mean residual life for each threshold
mean_residual_lives, mrl_confidence = [], []
for threshold in thresholds:
if extremes_type == "high":
exceedances = ts.loc[ts > threshold] - threshold
elif extremes_type == "low":
exceedances = ts.loc[ts < threshold] - threshold
else:
raise ValueError(
f"invalid value in '{extremes_type}' for the 'extremes_type' argument"
)
mean_residual_lives.append(exceedances.mean())
if alpha is not None:
mrl_confidence.append(
scipy.stats.norm.interval(
alpha=alpha,
loc=exceedances.mean(),
scale=exceedances.std(ddof=1) / np.sqrt(len(exceedances)),
)
)
with plt.rc_context(rc=pyextremes_rc):
if ax is None:
_, ax = plt.subplots(figsize=figsize, dpi=96)
ax.grid(False)
# Plotting central estimates of mean residual life
ax.plot(
thresholds,
mean_residual_lives,
color="#F85C50",
lw=2,
ls="-",
zorder=15,
)
# Plot confidence intervals
if alpha is not None:
for ci in np.transpose(mrl_confidence):
ax.plot(thresholds, ci, color="#5199FF", lw=1, ls="--", zorder=10)
ax.fill_between(
thresholds,
*np.transpose(mrl_confidence),
facecolor="#5199FF",
edgecolor="None",
alpha=0.25,
zorder=5,
)
# Label axes
ax.set_xlabel("Threshold")
ax.set_ylabel("Mean excess")
return ax
def _calculate_modified_parameters(
args: typing.Tuple[
pd.Series, # ts (time series)
str, # extremes_type
float, # threshold
typing.Union[str, pd.Timedelta], # r
typing.Optional[float], # alpha
int, # n_samples
int, # seed
],
) -> typing.Dict[str, typing.Optional[float]]:
(
ts,
extremes_type,
threshold,
r,
alpha,
n_samples,
seed,
) = args
result: typing.Dict[str, typing.Optional[float]] = {"threshold": threshold}
# Get extremes
extremes = get_extremes(
ts=ts,
method="POT",
extremes_type=extremes_type,
threshold=threshold,
r=r,
)
extremes_transformer = ExtremesTransformer(
extremes=extremes,
extremes_type=extremes_type,
)
# Get central estimates for shape and scale parameters
c, _, scale = scipy.stats.genpareto.fit(
data=extremes_transformer.transformed_extremes,
floc=threshold,
)
result["shape"] = c
result["scale"] = scale - c * threshold
# Get confidence bounds
if alpha is None:
result["shape_ci_lower"] = None
result["shape_ci_upper"] = None
result["scale_ci_lower"] = None
result["scale_ci_upper"] = None
if alpha is not None:
# Get fit parameters
rng_generator = np.random.default_rng(seed=seed)
fit_parameters = [
scipy.stats.genpareto.fit(
data=rng_generator.choice(
a=extremes.values,
size=len(extremes),
replace=True,
),
floc=threshold,
)
for _ in range(n_samples)
]
# Calculate confidence bounds for shape and scale parameters
result["shape_ci_lower"], result["shape_ci_upper"] = np.quantile(
a=np.transpose(fit_parameters)[0],
q=[(1 - alpha) / 2, (1 + alpha) / 2],
)
result["scale_ci_lower"], result["scale_ci_upper"] = np.quantile(
a=np.transpose(fit_parameters)[2]
- np.transpose(fit_parameters)[0] * threshold,
q=[(1 - alpha) / 2, (1 + alpha) / 2],
)
return result
def plot_parameter_stability(
ts: pd.Series,
thresholds=None,
r: typing.Union[str, pd.Timedelta] = "24H",
extremes_type: str = "high",
alpha: typing.Optional[float] = None,
n_samples: int = 100,
axes: typing.Optional[typing.Tuple[plt.Axes, plt.Axes]] = None,
figsize: tuple = (8, 5),
progress: bool = False,
) -> typing.Tuple[plt.Axes, plt.Axes]:
"""
Plot parameter stability plot for given threshold values.
The parameter stability plot shows shape and modified scale parameters
of the Generalized Pareto Distribution (GPD).
Both shape and modified scale parameters should be approximately constant above
a threshold for which the GPD model is valid.
The strategy is to select the smallest (largest for extremes_type='low')
threshold value immediately above (below for extremes_type='low')
which the GPD parameters are approximately constant.
Parameters
----------
ts : pandas.Series
Time series of the signal.
thresholds : array-like, optional
An array of thresholds for which the mean residual life plot is plotted.
If None (default), plots mean residual life for 100 equally-spaced thresholds
between 90th (10th if extremes_type='high') percentile
and 10th largest (smallest if extremes_type='low') value in the series.
r : pandas.Timedelta or value convertible to timedelta, optional
Duration of window used to decluster the exceedances.
By default r='24H' (24 hours).
See pandas.to_timedelta for more information.
extremes_type : str, optional
high (default) - extreme high values
low - extreme low values
alpha : float, optional
Confidence interval width in the range (0, 1).
If None (default), then confidence interval is not shown.
n_samples : int, optional
Number of bootstrap samples used to estimate
confidence interval bounds (default=100).
Ignored if `alpha` is None.
axes : (ax_shape, ax_scale), optional
Tuple with matplotlib Axes for shape and scale values.
If None (default), new figure and axes are created.
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
progress : bool, optional
If True, shows tqdm progress bar.
By default False.
Returns
-------
ax_shape : matplotlib.axes._axes.Axes
Axes with shape parameter values.
ax_scale : matplotlib.axes._axes.Axes
Axes with scale parameter values.
"""
try:
import tqdm # pylint: disable=import-outside-toplevel
except ImportError as error:
if progress:
raise ImportError(
"'tqdm' package is required to display a progress bar"
) from error
# Get default thresholds
if thresholds is None:
thresholds = get_default_thresholds(
ts=ts,
extremes_type=extremes_type,
num=100,
)
# List of unique seeds - ensures same seed is not reused across sub-processes
seeds: typing.List[int] = []
def _input_generator() -> typing.Generator[
typing.Tuple[
pd.Series, # ts (time series)
str, # extremes_type
float, # threshold
typing.Union[str, pd.Timedelta], # r
typing.Optional[float], # alpha
int, # n_samples
int, # seed
],
None,
None,
]:
for threshold in thresholds:
seed = np.random.randint(low=0, high=1e6, size=None)
while seed in seeds:
seed = np.random.randint(low=0, high=1e6, size=None)
seeds.append(seed)
yield (ts, extremes_type, threshold, r, alpha, n_samples, seed)
iterable = (
tqdm.tqdm(
_input_generator(),
desc="calculating stability parameters",
total=len(thresholds),
smoothing=0,
)
if progress
else _input_generator()
)
cpu_count = os.cpu_count() or 1
if cpu_count > 1:
with multiprocessing.Pool(processes=os.cpu_count()) as pool:
_results = list(pool.imap(_calculate_modified_parameters, iterable))
else:
_results = []
for args in iterable:
_results.append(_calculate_modified_parameters(args))
results = (
pd.DataFrame(data=_results).set_index("threshold").sort_index(ascending=True)
)
with plt.rc_context(rc=pyextremes_rc):
if axes is None:
# Create figure
fig = plt.figure(figsize=figsize, dpi=96)
# Create gridspec
gs = matplotlib.gridspec.GridSpec(
nrows=2,
ncols=1,
wspace=0.1,
hspace=0.1,
width_ratios=[1],
height_ratios=[1, 1],
)
# Create and configure axes
ax_shape = fig.add_subplot(gs[0, 0])
ax_scale = fig.add_subplot(gs[1, 0])
else:
fig = None
ax_shape, ax_scale = axes
# Plot central estimates of shape and modified scale parameters
ax_shape.plot(
results.index,
results.loc[:, "shape"],
ls="-",
color="#F85C50",
lw=2,
zorder=15,
)
ax_scale.plot(
results.index,
results.loc[:, "scale"],
ls="-",
color="#F85C50",
lw=2,
zorder=15,
)
# Plot confidence bounds
if alpha is not None:
for ax, parameter in [(ax_shape, "shape"), (ax_scale, "scale")]:
for ci in ["lower", "upper"]:
ax.plot(
results.index,
results.loc[:, f"{parameter}_ci_{ci}"],
color="#5199FF",
lw=1,
ls="--",
zorder=10,
)
ax.fill_between(
results.index,
results.loc[:, f"{parameter}_ci_lower"],
results.loc[:, f"{parameter}_ci_upper"],
facecolor="#5199FF",
edgecolor="None",
alpha=0.25,
zorder=5,
)
if fig is not None:
# Configure axes
ax_shape.tick_params(axis="x", which="both", labelbottom=False, length=0)
ax_scale.set_xlim(ax_shape.get_xlim())
# Label axes
ax_shape.set_ylabel(r"Shape, $\xi$")
ax_scale.set_ylabel(r"Modified scale, $\sigma^*$")
if fig is not None:
ax_scale.set_xlabel("Threshold")
return ax_shape, ax_scale
def _calculate_return_value(
args: typing.Tuple[
pd.Series, # ts (time series)
float, # return_period
typing.Union[str, pd.Timedelta], # return_period_size
float, # threshold
typing.Union[str, pd.Timedelta], # r
str, # extremes_type
typing.Union[str, scipy.stats.rv_continuous], # distribution
str, # distribution_name
typing.Optional[float], # alpha
int, # n_samples
],
) -> typing.Dict[str, typing.Union[str, typing.Optional[float]]]:
(
ts,
return_period,
return_period_size,
threshold,
r,
extremes_type,
distribution,
distribution_name,
alpha,
n_samples,
) = args
model = EVA(data=ts)
model.get_extremes(
method="POT",
extremes_type=extremes_type,
threshold=threshold,
r=r,
)
model.fit_model(
model="MLE",
distribution=distribution,
)
# TODO - this is a hack to avoid spawning nested subprocesses
_n_samples = n_samples % 10
while _n_samples < n_samples:
_n_samples += 10
model.get_return_value(
return_period=return_period,
return_period_size=return_period_size,
alpha=alpha,
n_samples=_n_samples,
)
rv, cil, ciu = model.get_return_value(
return_period=return_period,
return_period_size=return_period_size,
alpha=alpha,
n_samples=n_samples,
)
return {
"distribution_name": distribution_name,
"threshold": threshold,
"rv": rv,
"cil": cil,
"ciu": ciu,
}
def plot_return_value_stability(
ts: pd.Series,
return_period: float,
return_period_size: typing.Union[str, pd.Timedelta] = "365.2425D",
thresholds=None,
r: typing.Union[str, pd.Timedelta] = "24H",
extremes_type: str = "high",
distributions: typing.Optional[
typing.List[typing.Union[str, scipy.stats.rv_continuous]]
] = None,
alpha: typing.Optional[float] = None,
n_samples: int = 100,
ax: typing.Optional[plt.Axes] = None,
figsize: tuple = (8, 5),
progress: bool = False,
) -> plt.Axes:
"""
Plot return value stability plot for given threshold values.
The return value stability plot shows return values for given return period
for given thresholds.
The purpose of this plot is to investigate statibility and sensitivity of the
Generalized Pareto Distribution model to threshold value.
Threshold value selection should still be guided by the mean residual life plot
and the parameter stability plot. This plot should be used as additional check.
Parameters
----------
ts : pandas.Series
Time series of the signal.
return_period : float
Return period.
Given as a multiple of `return_period_size`.
return_period_size : str or pandas.Timedelta, optional
Size of return period (default='365.2425D').
If set to '30D', then a return period of 12
would be roughly equivalent to a 1 year return period (360 days).
thresholds : array-like, optional
An array of thresholds for which the return value plot is plotted.
If None (default), plots return values for 100 equally-spaced thresholds
between 90th (10th if extremes_type='high') percentile
and 10th largest (smallest if extremes_type='low') value in the series.
r : pandas.Timedelta or value convertible to timedelta, optional
Duration of window used to decluster the exceedances.
By default r='24H' (24 hours).
See pandas.to_timedelta for more information.
extremes_type : str, optional
high (default) - extreme high values
low - extreme low values
distributions : list, optional
List of distributions for which the return value curves are plotted.
By default these are "genpareto" and "expon".
A distribution must be either a name of distribution from scipy.stats
or a subclass of scipy.stats.rv_continuous.
See https://docs.scipy.org/doc/scipy/reference/stats.html
alpha : float, optional
Confidence interval width in the range (0, 1).
If None (default), then confidence interval is not shown.
n_samples : int, optional
Number of bootstrap samples used to estimate
confidence interval bounds (default=100).
Ignored if `alpha` is None.
ax : matplotlib.axes._axes.Axes, optional
If provided, then the plot is drawn on this axes.
If None (default), new figure and axes are created
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
progress : bool, optional
If True, shows tqdm progress bar.
By default False.
Returns
-------
matplotlib.axes._axes.Axes
Axes object.
"""
try:
import tqdm # pylint: disable=import-outside-toplevel
except ImportError as error:
if progress:
raise ImportError(
"'tqdm' package is required to display a progress bar"
) from error
# Get default `thresholds`
if thresholds is None:
thresholds = get_default_thresholds(
ts=ts,
extremes_type=extremes_type,
num=100,
)
# Get default `distributions`
if distributions is None:
distributions = [
"genpareto",
"expon",
]
distribution_names: typing.List[str] = []
for distribution in distributions:
if isinstance(distribution, str):
distribution_names.append(distribution)
else:
distribution_names.append(distribution.name)
def _input_generator() -> typing.Generator[
typing.Tuple[
pd.Series, # ts (time series)
float, # return_period
typing.Union[str, pd.Timedelta], # return_period_size
float, # threshold
typing.Union[str, pd.Timedelta], # r
str, # extremes_type
typing.Union[str, scipy.stats.rv_continuous], # distribution
str, # distribution_name
typing.Optional[float], # alpha
int, # n_samples
],
None,
None,
]:
for distribution, distribution_name in zip(distributions, distribution_names):
for threshold in thresholds:
yield (
ts,
return_period,
return_period_size,
threshold,
r,
extremes_type,
distribution,
distribution_name,
alpha,
n_samples,
)
iterable = (
tqdm.tqdm(
_input_generator(),
desc="calculating return values",
total=len(distributions) * len(thresholds),
smoothing=0,
)
if progress
else _input_generator()
)
cpu_count = os.cpu_count() or 1
if cpu_count > 1:
with multiprocessing.Pool(processes=os.cpu_count()) as pool:
_results = list(pool.imap(_calculate_return_value, iterable))
else:
_results = []
for args in iterable:
_results.append(_calculate_return_value(args))
results = pd.DataFrame(data=_results).sort_values("threshold", ascending=True)
with plt.rc_context(rc=pyextremes_rc):
if ax is None:
_, ax = plt.subplots(figsize=figsize, dpi=96)
ax.grid(False)
for i, (distribution_name, df) in enumerate(
results.groupby("distribution_name")
):
# Plot central estimate of return values
color = pyextremes_rc["axes.prop_cycle"].by_key()["color"][i]
ax.plot(
df.loc[:, "threshold"],
df.loc[:, "rv"],
color=color,
lw=2,
ls="-",
label=distribution_name,
zorder=(i + 3) * 5,
)
# Plot confidence bounds
if alpha is not None:
for column in ["cil", "ciu"]:
ax.plot(
df.loc[:, "threshold"],
df.loc[:, column],
color=color,
lw=1,
ls="--",
zorder=(i + 2) * 5,
)
ax.fill_between(
df.loc[:, "threshold"],
df.loc[:, "cil"],
df.loc[:, "ciu"],
facecolor=color,
edgecolor="None",
alpha=0.25,
zorder=(i + 1) * 5,
)
# Plot legend
ax.legend(frameon=True, framealpha=0.9)
# Label axes
ax.set_xlabel("Threshold")
ax.set_ylabel("Return value")
return ax
def plot_aic_scores(
ts: pd.Series,
thresholds=None,
r: typing.Union[str, pd.Timedelta] = "24H",
extremes_type: str = "high",
distributions: typing.Optional[
typing.List[typing.Union[str, scipy.stats.rv_continuous]]
] = None,
ax: typing.Optional[plt.Axes] = None,
figsize: tuple = (8, 5),
) -> plt.Axes:
"""
Plot AIC scores for each distribution and threshold.
Used to investigate which distribution better explains data variance for each
threshold value. Does NOT indicate which threshold value is better because
it will always have the same shape - logarithmic curve.
Parameters
----------
ts : pandas.Series
Time series of the signal.
thresholds : array-like, optional
An array of thresholds for which the AIC plot is plotted.
If None (default), plots AIC for 100 equally-spaced thresholds
between 90th (10th if extremes_type='high') percentile
and 10th largest (smallest if extremes_type='low') value in the series.
r : pandas.Timedelta or value convertible to timedelta, optional
Duration of window used to decluster the exceedances.
By default r='24H' (24 hours).
See pandas.to_timedelta for more information.
extremes_type : str, optional
high (default) - extreme high values
low - extreme low values
distributions : list, optional
List of distributions for which the AIC curves are plotted.
By default these are "genpareto" and "expon".
A distribution must be either a name of distribution from scipy.stats
or a subclass of scipy.stats.rv_continuous.
See https://docs.scipy.org/doc/scipy/reference/stats.html
ax : matplotlib.axes._axes.Axes, optional
If provided, then the plot is drawn on this axes.
If None (default), new figure and axes are created
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
Returns
-------
plt.Axes
Axes object.
"""
# Get default `thresholds`
if thresholds is None:
thresholds = get_default_thresholds(
ts=ts,
extremes_type=extremes_type,
num=100,
)
# Get default `distributions`
if distributions is None:
distributions = [
"genpareto",
"expon",
]
distribution_names: typing.List[str] = []
for distribution in distributions:
if isinstance(distribution, str):
distribution_names.append(distribution)
else:
distribution_names.append(distribution.name)
# Calculate AIC values
model = EVA(data=ts)
results = []
for distribution, distribution_name in zip(distributions, distribution_names):
for threshold in thresholds:
model.get_extremes(
method="POT",
extremes_type=extremes_type,
threshold=threshold,
r=r,
)
model.fit_model(model="MLE", distribution=distribution)
results.append(
{
"distribution_name": distribution_name,
"threshold": threshold,
"aic": model.AIC,
}
)
results = pd.DataFrame(data=results).sort_values("threshold", ascending=True)
with plt.rc_context(rc=pyextremes_rc):
if ax is None:
_, ax = plt.subplots(figsize=figsize, dpi=96)
ax.grid(False)
for i, (distribution_name, df) in enumerate(
results.groupby("distribution_name")
):
ax.plot(
df.loc[:, "threshold"],
df.loc[:, "aic"],
color=pyextremes_rc["axes.prop_cycle"].by_key()["color"][i],
lw=2,
ls="-",
label=distribution_name,
zorder=(i + 3) * 5,
)
# Plot legend
ax.legend(frameon=True, framealpha=0.9)
# Label axes
ax.set_xlabel("Threshold")
ax.set_ylabel("AIC Score")
return ax
def plot_threshold_stability(
ts: pd.Series,
return_period: float,
return_period_size: typing.Union[str, pd.Timedelta] = "365.2425D",
thresholds=None,
r: typing.Union[str, pd.Timedelta] = "24H",
extremes_type: str = "high",
distributions: typing.Optional[
typing.List[typing.Union[str, scipy.stats.rv_continuous]]
] = None,
alpha: typing.Optional[float] = None,
n_samples: int = 100,
figsize: typing.Tuple[float, float] = (8, 2.5 * 4),
progress: bool = False,
) -> typing.Tuple[plt.Axes, plt.Axes, plt.Axes, plt.Axes]:
"""
Plot threshold influence on GPD parameters, return values, and AIC scores.
Used as a utility function which plots multiple metrics in the same figure.
Parameters
----------
ts : pandas.Series
Time series of the signal.
return_period : float
Return period.
Given as a multiple of `return_period_size`.
return_period_size : str or pandas.Timedelta, optional
Size of return period (default='365.2425D').
If set to '30D', then a return period of 12
would be roughly equivalent to a 1 year return period (360 days).
thresholds : array-like, optional
An array of thresholds for which the metrics are plotted.
If None (default), plots matrics for 100 equally-spaced thresholds
between 90th (10th if extremes_type='high') percentile
and 10th largest (smallest if extremes_type='low') value in the series.
r : pandas.Timedelta or value convertible to timedelta, optional
Duration of window used to decluster the exceedances.
By default r='24H' (24 hours).
See pandas.to_timedelta for more information.
extremes_type : str, optional
high (default) - extreme high values
low - extreme low values
distributions : list, optional
List of distributions for which the metrics are plotted.
By default these are "genpareto" and "expon".
A distribution must be either a name of distribution from scipy.stats
or a subclass of scipy.stats.rv_continuous.
See https://docs.scipy.org/doc/scipy/reference/stats.html
alpha : float, optional
Confidence interval width in the range (0, 1).
If None (default), then confidence interval is not shown.
n_samples : int, optional
Number of bootstrap samples used to estimate
confidence interval bounds (default=100).
Ignored if `alpha` is None.
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
progress : bool, optional
If True, shows tqdm progress bar.
By default False.
Returns
-------
ax_shape : matplotlib.axes._axes.Axes
ax_scale : matplotlib.axes._axes.Axes
ax_rv : matplotlib.axes._axes.Axes
ax_aic : matplotlib.axes._axes.Axes
"""
# Get default thresholds
if thresholds is None:
thresholds = get_default_thresholds(
ts=ts,
extremes_type=extremes_type,
num=100,
)
with plt.rc_context(rc=pyextremes_rc):
# Create figure
fig = plt.figure(figsize=figsize, dpi=96)
# Create gridspec
gs = matplotlib.gridspec.GridSpec(
nrows=4,
ncols=1,
wspace=0.1,
hspace=0.1,
width_ratios=[1],
height_ratios=[1, 1, 1, 1],
)
# Create and configure axes
ax_shape = fig.add_subplot(gs[0, 0])
ax_scale = fig.add_subplot(gs[1, 0])
ax_rv = fig.add_subplot(gs[2, 0])
ax_aic = fig.add_subplot(gs[3, 0])
axes = [ax_shape, ax_scale, ax_rv, ax_aic]
# Produce individual plots
plot_parameter_stability(
ts=ts,
thresholds=thresholds,
r=r,
extremes_type=extremes_type,
alpha=alpha,
n_samples=n_samples,
axes=(ax_shape, ax_scale),
progress=progress,
)
plot_return_value_stability(
ts=ts,
return_period=return_period,
return_period_size=return_period_size,
thresholds=thresholds,
r=r,
extremes_type=extremes_type,
distributions=distributions,
alpha=alpha,
n_samples=n_samples,
ax=ax_rv,
progress=progress,
)
plot_aic_scores(
ts=ts,
thresholds=thresholds,
r=r,
extremes_type=extremes_type,
distributions=distributions,
ax=ax_aic,
)
# Format axes
for ax in axes[:-1]:
ax.tick_params(axis="x", which="both", labelbottom=False, length=0)
ax.set_xlim(axes[-1].get_xlim())
ax.set_xlabel("")
axes[-1].set_xlabel("Threshold")
return ax_shape, ax_scale, ax_rv, ax_aic
|
11516789
|
import os
from typing import Any, Dict, Mapping, Tuple, Union
from htmap import transfer
KWARGS = Dict[str, Any]
ARGS = Tuple[Any, ...]
ARGS_OR_KWARGS = Union[ARGS, KWARGS]
ARGS_AND_KWARGS = Tuple[ARGS, KWARGS]
ITEMDATUM = Dict[str, str]
TRANSFER_PATH = Union[os.PathLike, transfer.TransferPath]
REMAPS = Mapping[str, transfer.TransferPath]
|
11516797
|
import numpy as np
from sklearn.preprocessing import StandardScaler
# from matplotlib import pyplot as plt
from .trend_filters import HPTrendFilter_trend
from .wavelet_fun import robust_MODWT_var
from .periodogram_acf_fun import general_periodogram, acf_med_from_periodogram
from .fisher_siegel_test import fisher_test
from .util import plot_signal_periodogram_acf
figsize = (9, 2)
def two_value_similar(a, b, margin_percent=10):
return np.abs(a - b) / max(a, b) <= margin_percent / 100.0
# assume the similar values at most 2 (next to each other since modwt)
def merge_similar_periods(periods_candi, margin_percent=10):
pd_num = len(periods_candi)
if pd_num <= 1:
return periods_candi
output = set()
ii = 0
while ii < pd_num:
if ii == pd_num - 1:
a = periods_candi[ii]
output.add(a)
break
a = periods_candi[ii]
b = periods_candi[ii + 1]
if two_value_similar(a, b, margin_percent):
output.add(int((a + b) / 2))
ii += 2
else:
output.add(a)
ii += 1
return sorted(output)
def merge_two_periods(per_a, per_b, eng_a, eng_b):
out_eng = eng_a + eng_b
out_per = (per_a * eng_a + per_b * eng_b) / out_eng
return out_per, out_eng
def merge_similar_periods_by_ranking(in_periods_candi,
in_periods_candi_energy,
margin_percent=10):
pd_num = len(in_periods_candi)
if pd_num <= 1:
return in_periods_candi, in_periods_candi_energy
# ranking based on in_periods_candi in order to merge later
sorted_idx = np.argsort(in_periods_candi)
periods_candi = np.array(in_periods_candi)[sorted_idx].tolist()
periods_candi_energy = np.array(
in_periods_candi_energy)[sorted_idx].tolist()
out_periods_candi = []
out_periods_candi_energy = []
ii = 0
while ii < pd_num:
if ii == pd_num - 1: # the last one
per_a, eng_a = periods_candi[ii], periods_candi_energy[ii]
out_periods_candi.append(per_a)
out_periods_candi_energy.append(eng_a)
break
per_a, eng_a = periods_candi[ii], periods_candi_energy[ii]
per_b, eng_b = periods_candi[ii + 1], periods_candi_energy[ii + 1]
if two_value_similar(per_a, per_b, margin_percent):
out_per, out_eng = merge_two_periods(per_a, per_b, eng_a, eng_b)
ii += 2
else:
out_per, out_eng = per_a, eng_a
ii += 1
out_periods_candi.append(out_per)
out_periods_candi_energy.append(out_eng)
return out_periods_candi, out_periods_candi_energy
def tfPeriod(original_data,
trend_remove_toggle=True,
robustTrend_toggle=True,
trend_lambda=1e8,
wavelet_name='db4',
robust_wavelet_toggle=True,
robust_wv_toggle=True,
robust_wavelet_ra_fun='Huber', # Huber, Tukey
periodogram_cost_fun='Huber', # Huber, LS, LAD
# (# Note: for LAD, use Huber with t=.001 have better stability)
periodogram_huber_t=1.0,
periodogram_admm_maxiters=50,
fisher_alph_th=1e-5,
acf_out_len=None,
acf_peak_th=0.5,
acf_avg='median',
output_ranking=False, # added new feature
output_period_maxnum=None,
debug=False):
output_periods_set = set()
output_periods_list = []
output_periods_energy_list = []
# at least 16 points length
if len(original_data) < 16:
return sorted(output_periods_set)
# normalize data: scale to around ~N(0,1)
original_signal = original_data.reshape(-1, 1)
scaler = StandardScaler()
real_data = scaler.fit_transform(original_signal)
# (1) (robust) coarse trend removing
if trend_remove_toggle:
detrend_filter = HPTrendFilter_trend(lambda_reg=trend_lambda,
quasi_robust=robustTrend_toggle,
upper_percent=0.9,
lower_percent=0.1)
trend_data = detrend_filter.fit_transform(real_data)
input_data_detrend = real_data - trend_data
else:
trend_data = np.zeros_like(real_data)
input_data_detrend = real_data
# (1-1): truncate or pad to 2**n length
ori_len = len(input_data_detrend)
final_len_large = 2**int(np.ceil(np.log2(ori_len)))
final_len_small = 2**int(np.floor(np.log2(ori_len)))
if final_len_small == final_len_large:
input_data_adj = input_data_detrend
elif (ori_len - final_len_small) / final_len_small <= 0.5:
# truncate
input_data_adj = input_data_detrend[:final_len_small]
else:
# padding
final_len = final_len_large
pad_len = final_len - ori_len
pad_len_begin = 0
pad_len_end = pad_len
input_data_adj = np.pad(input_data_detrend.reshape(-1, ),
(pad_len_begin, pad_len_end),
'wrap') # wrap, symmetric is better
input_data_adj = input_data_adj.reshape(-1, 1)
# (2) robust Wavelet and robust WV
input_data_scaled, coeffs, wavelet_level_vars, level_num = \
robust_MODWT_var(input_data=input_data_adj,
wavelet_name=wavelet_name,
robust_wavelet_toggle=robust_wavelet_toggle,
robust_wv_toggle=robust_wv_toggle,
robust_wavelet_ra_fun=robust_wavelet_ra_fun)
if debug:
# Fig 1: ori, peri, acf
from matplotlib import pyplot as plt
plot_signal_periodogram_acf(original_signal)
# Fig 2: remove trend and robust prepressing for wavelet
fig, axarr = plt.subplots(nrows=1, ncols=2, figsize=(9, 2))
axarr[0].plot(real_data, 'b', label='real_data (normalized)')
axarr[0].plot(trend_data, 'r', label='trend_data')
axarr[0].legend() # loc='upper right' upper, lower, center
axarr[1].plot(input_data_detrend, 'b', label='detrended_signal')
axarr[1].plot(input_data_scaled,
'r',
label='signal after robust prepressing for wavelet')
axarr[1].legend()
plt.tight_layout()
plt.show()
# Fig 3: before Wavelet: signal, peri, acf
plot_signal_periodogram_acf(input_data_scaled)
# Fig 4: for each level of wavelet
fontsize = 8
fig, axarr = plt.subplots(nrows=level_num, ncols=3, figsize=(9, 12))
# each level processing of wavelet coefficient
for ii in range(level_num):
wavelet_level = ii + 1
# print('wavelet_level=%d' % (wavelet_level))
(data, coeff_d) = coeffs[level_num - ii - 1]
# (3) robust Huber-Periodogram based Fisher-test
# zero padding
data_pad = np.hstack((coeff_d, np.zeros(len(coeff_d))))
periodogram_values, ts_len = general_periodogram(
input_data=data_pad,
cost_fun=periodogram_cost_fun,
Huber_t=periodogram_huber_t)
# fisher test
period_candi, period_range, per_T, pValue, _ = fisher_test(
periodogram_values, ts_len, fisher_alph_th)
# final ACF
passed_check_acf, final_period, acf_period, acf_result, peaks_idx = \
acf_med_from_periodogram(coeff_d,
periodogram_values,
acf_peak_th=acf_peak_th,
acf_avg=acf_avg,
acf_out_len=acf_out_len,
period_candi=period_candi,
period_range=period_range)
if passed_check_acf:
output_periods_set.add(final_period)
# for ranking (TODO: "for ii in range(level_num)"" based on wv)
output_periods_list.append(final_period)
output_periods_energy_list.append(wavelet_level_vars[ii])
if debug:
# plot Detail Coef TS
axarr[ii, 0].plot(coeff_d, 'g')
axarr[ii, 0].set_title("Wavelet Coef: Var=%.3f" %
(wavelet_level_vars[ii]),
fontsize=fontsize)
axarr[ii, 0].set_ylabel("Level {}".format(wavelet_level),
fontsize=fontsize,
rotation=90)
# plot periodogram
axarr[ii, 1].plot(periodogram_values, 'r')
axarr[ii, 1].set_title("PER: p=%.2e; per_T=%d,fin_T=%d" %
(pValue, per_T, period_candi),
fontsize=fontsize)
# ## tmp for paper fig
# axarr[ii, 1].set_title("Periodogram: p=%.2e; per_T=%d" %
# (pValue, period_candi),
# fontsize=fontsize)
# plot ACF
acf_result_pk_th = acf_peak_th * np.ones_like(acf_result)
acf_result_peaks = acf_result[peaks_idx]
axarr[ii, 2].plot(acf_result, 'b')
axarr[ii, 2].plot(acf_result_pk_th, 'g')
if passed_check_acf:
axarr[ii, 2].plot(peaks_idx, acf_result_peaks, 'r*')
axarr[ii,
2].set_title("ACF: acf_T=%d,fin_T=%d; Period=%s" %
(acf_period, final_period, passed_check_acf),
fontsize=fontsize)
if debug:
plt.tight_layout()
plt.show()
# plt.savefig("simEx-WaveFFTACF.pdf")
# print("wavelet_level_vars")
# print(wavelet_level_vars)
# plot WV
plt.figure(figsize=(9, 3.7))
fontsize=14
plt.plot(range(1, level_num + 1), (wavelet_level_vars),
'-*',
label='robust wavelet variance')
plt.ylim((0, 0.23))
plt.legend(loc='upper left',fontsize=fontsize)
plt.xlabel("Wavelet Level")
plt.ylabel("Wavelet Variance")
plt.show()
# plt.savefig("simEx-WaveVariance.pdf")
if output_ranking:
for idx in range(5):
output_periods_list, output_periods_energy_list = \
merge_similar_periods_by_ranking(
output_periods_list, output_periods_energy_list)
# final output: rank and round
sorted_idx = np.argsort(output_periods_energy_list)[::-1]
output_periods_list = np.array(output_periods_list)[sorted_idx].astype(
int).tolist()
# output_periods_energy_list = np.array(
# output_periods_energy_list)[sorted_idx].tolist()
final_out = output_periods_list
if output_periods_list is not None and len(output_periods_list) > 1:
final_out = output_periods_list[:output_period_maxnum]
else:
tmp_out = sorted(output_periods_set)
tmp_out1 = merge_similar_periods(tmp_out)
final_out = merge_similar_periods(tmp_out1)
return final_out
|
11516804
|
import datetime
from mldb import mldb
start = datetime.datetime.now();
# Note that this test requires a *local, uncompresed* copy of the file
# so that it can be memory mapped, and this file is 15GB large and
# contains 150 million records. So this is not a test that can be
# easily replicated, hence the manual designation.
mldb.put("/v1/procedures/airline", {
"type":"import.text",
"params": {
"dataFileUrl": "file://allyears.1987.2013.csv",
"offset" : 0,
"ignoreBadLines" : True,
"outputDataset": {
"id": "airline"
},
"runOnCreation": True
}
})
mldb.log(datetime.datetime.now() - start)
|
11516820
|
import numpy as np
from pyapprox.approximate import adaptive_approximate
def adaptive_approximate_multi_index_sparse_grid(fun, variable, options):
"""
A light weight wrapper for building multi-index approximations.
Some checks are made to ensure certain required options have been provided.
See :func:`pyapprox.approximate.adaptive_approximate_sparse_grid` for more
details.
"""
assert 'config_variables_idx' in options
assert 'config_var_trans' in options
sparse_grid = adaptive_approximate(fun, variable, 'sparse_grid', options)
return sparse_grid
|
11516821
|
import numpy as np
NUM_UNITS=128
INPUT_SIZE = 28
def bi2str(bi):
lb=list(bi)
ss=""
for nn in lb:
ss+=("_ParaFrom(%.8f),"%(nn))
return ss[0:len(ss)-1]
def we2str(we):
we=np.reshape(we,(1,np.size(we)))[0]
lw=list(we)
ss=""
for nn in lw:
ss+=("_ParaFrom(%.8f),"%(nn))
return ss[0:len(ss)-1]
npzfile=np.load('RnnData2.npz')
we1=npzfile['W_RU']
bi1=npzfile['B_RU']
we2=npzfile['W_core']
bi2=npzfile['B_core']
we3=npzfile['W_f']
bi3=npzfile['B_f'][0]
xin=npzfile['xin']
ypre=npzfile['ypre']
yref=npzfile['yref']
WR=we1[:,:NUM_UNITS]
BR=bi1[:NUM_UNITS]
WU=we1[:,NUM_UNITS:]
BU=bi1[NUM_UNITS:]
wrx=WR[:INPUT_SIZE,:]
wrh=WR[INPUT_SIZE:,:]
wux=WU[:INPUT_SIZE,:]
wuh=WU[INPUT_SIZE:,:]
wcx=we2[:INPUT_SIZE,:]
wch=we2[INPUT_SIZE:,:]
we1str=""
we1str+=(we2str(wrh)+",\n")
we1str+=(we2str(wrx)+",\n")
we1str+=(we2str(wuh)+",\n")
we1str+=(we2str(wux)+",\n")
we1str+=(we2str(wch)+",\n")
we1str+=(we2str(wcx))
bi1str=""
bi1str+=(bi2str(BR)+",")
bi1str+=(bi2str(BU)+",")
bi1str+=(bi2str(bi2)+",")
restr="#ifndef MINST_RNN_H_\n#define MINST_RNN_H_\n"
restr+="const ParaType we1[]={"+we1str+"};\n"
restr+="const ParaType bi1[]={"+bi1str+"};\n"
restr+="const ParaType we2[]={"+we2str(we3)+"};\n"
restr+="const ParaType bi2[]={"+bi2str(bi3)+"};\n"
restr+="#endif\n"
file=open("MNIST_RNN.h",'w+')
file.write(restr)
file.close()
print(list(xin[0]))
print(list(yref[0]))
print(list(ypre[0]))
|
11516835
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import unittest
"""
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120
"""
TRACE_LINE_PATTERN = re.compile(
r'^\s*(?P<task>.+)-(?P<pid>\d+)\s+(?:\((?P<tgid>.+)\)\s+)?\[(?P<cpu>\d+)\]\s+(?:(?P<flags>\S{4})\s+)?(?P<timestamp>[0-9.]+):\s+(?P<function>.+)$')
"""
Example lines from custom app traces:
0: B|27295|providerRemove
0: E
tracing_mark_write: S|27311|NNFColdStart<D-7744962>|1112249168
"""
APP_TRACE_LINE_PATTERN = re.compile(
r'^(?P<type>.+?): (?P<args>.+)$')
"""
Example section names:
NNFColdStart
NNFColdStart<0><T7744962>
NNFColdStart<X>
NNFColdStart<T7744962>
"""
DECORATED_SECTION_NAME_PATTERN = re.compile(r'^(?P<section_name>.*?)(?:<0>)?(?:<(?P<command>.)(?P<argument>.*?)>)?$')
SYSTRACE_LINE_TYPES = set(['0', 'tracing_mark_write'])
class TraceLine(object):
def __init__(self, task, pid, tgid, cpu, flags, timestamp, function):
self.task = task
self.pid = pid
self.tgid = tgid
self.cpu = cpu
self.flags = flags
self.timestamp = timestamp
self.function = function
self.canceled = False
@property
def is_app_trace_line(self):
return isinstance(self.function, AppTraceFunction)
def cancel(self):
self.canceled = True
def __str__(self):
if self.canceled:
return ""
elif self.tgid:
return "{task:>16s}-{pid:<5d} ({tgid:5s}) [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
elif self.flags:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
else:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {timestamp:12.6f}: {function}\n".format(**vars(self))
class AppTraceFunction(object):
def __init__(self, type, args):
self.type = type
self.args = args
self.operation = args[0]
if len(args) >= 2 and args[1]:
self.pid = int(args[1])
if len(args) >= 3:
self._section_name, self.command, self.argument = _parse_section_name(args[2])
args[2] = self._section_name
else:
self._section_name = None
self.command = None
self.argument = None
self.cookie = None
@property
def section_name(self):
return self._section_name
@section_name.setter
def section_name(self, value):
self._section_name = value
self.args[2] = value
def __str__(self):
return "{type}: {args}".format(type=self.type, args='|'.join(self.args))
class AsyncTraceFunction(AppTraceFunction):
def __init__(self, type, args):
super(AsyncTraceFunction, self).__init__(type, args)
self.cookie = int(args[3])
TRACE_TYPE_MAP = {
'S': AsyncTraceFunction,
'T': AsyncTraceFunction,
'F': AsyncTraceFunction,
}
def parse_line(line):
match = TRACE_LINE_PATTERN.match(line.strip())
if not match:
return None
task = match.group("task")
pid = int(match.group("pid"))
tgid = match.group("tgid")
cpu = int(match.group("cpu"))
flags = match.group("flags")
timestamp = float(match.group("timestamp"))
function = match.group("function")
app_trace = _parse_function(function)
if app_trace:
function = app_trace
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def parse_dextr_line(line):
task = line["name"]
pid = line["pid"]
tgid = line["tid"]
cpu = None
flags = None
timestamp = line["ts"]
function = AppTraceFunction("DextrTrace", [line["ph"], line["pid"], line["name"]])
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def _parse_function(function):
line_match = APP_TRACE_LINE_PATTERN.match(function)
if not line_match:
return None
type = line_match.group("type")
if not type in SYSTRACE_LINE_TYPES:
return None
args = line_match.group("args").split('|')
if len(args) == 1 and len(args[0]) == 0:
args = None
constructor = TRACE_TYPE_MAP.get(args[0], AppTraceFunction)
return constructor(type, args)
def _parse_section_name(section_name):
if section_name is None:
return section_name, None, None
section_name_match = DECORATED_SECTION_NAME_PATTERN.match(section_name)
section_name = section_name_match.group("section_name")
command = section_name_match.group("command")
argument = section_name_match.group("argument")
return section_name, command, argument
def _format_section_name(section_name, command, argument):
if not command:
return section_name
return "{section_name}<{command}{argument}>".format(**vars())
class RoundTripFormattingTests(unittest.TestCase):
def testPlainSectionName(self):
section_name = "SectionName12345-5562342fas"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testDecoratedSectionName(self):
section_name = "SectionName12345-5562342fas<D-123456>"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testSimpleFunction(self):
function = "0: E"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithoutCookie(self):
function = "0: B|27295|providerRemove"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookie(self):
function = "0: S|27311|NNFColdStart|1112249168"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookieAndArgs(self):
function = "0: T|27311|NNFColdStart|1122|Start"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithArgsButNoPid(self):
function = "0: E|||foo=bar"
self.assertEqual(function, str(_parse_function(function)))
def testKitKatFunction(self):
function = "tracing_mark_write: B|14127|Looper.dispatchMessage|arg=>>>>> Dispatching to Handler (android.os.Handler) {422ae980} null: 0|Java"
self.assertEqual(function, str(_parse_function(function)))
def testNonSysTraceFunctionIgnored(self):
function = "sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120"
self.assertEqual(None, _parse_function(function))
def testLineWithFlagsAndTGID(self):
line = " <idle>-0 ( 550) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlagsAndNoTGID(self):
line = " <idle>-0 (-----) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlags(self):
line = " <idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithoutFlags(self):
line = " <idle>-0 [001] 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
|
11516864
|
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.contrib.localflavor.us.us_states import USPS_CHOICES
class USStateField(CharField):
description = _("U.S. state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(USStateField, self).__init__(*args, **kwargs)
class USPostalCodeField(CharField):
description = _("U.S. postal code (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = USPS_CHOICES
kwargs['max_length'] = 2
super(USPostalCodeField, self).__init__(*args, **kwargs)
class PhoneNumberField(CharField):
description = _("Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
|
11516868
|
import xml.etree.ElementTree as ET
from os import walk, sep
file_converters = {
"DefaultPredicates": { "seperator": ":", "filename": "defaults.txt"},
"GenderSubstitutions": { "seperator": ":", "filename": "gender.txt"},
"PersonSubstitutions": { "seperator": ":", "filename": "person.txt"},
"Person2Substitutions": { "seperator": ":", "filename": "person2.txt"},
"Splitters": { "filename": "converters.txt"},
"Substitutions": { "seperator": ":", "filename": "denormals.txt"},
"Settings": { "seperator": ":", "filename": "settings.txt", "ignores": [
"aimldirectory",
"configdirectory",
"logdirectory",
"convertersfile",
"person2substitutionsfile",
"personsubstitutionsfile",
"gendersubstitutionsfile",
"defaultpredicates",
"substitutionsfile",
"maxlogbuffersize",
"notacceptinguserinputmessage",
"stripperregex",
"islogging",
"willcallhome",
"timeout",
"timeoutmessage",]}
}
class PandoraBotsFileConverter(object):
@staticmethod
def all_files(path):
files = []
for (_, _, names) in walk(path):
for name in names:
files.append(path + sep + name)
break
return files
@staticmethod
def get_filename(file):
splits1 = file.split(sep)
splits2 = splits1[-1].split(".")
return splits2[0]
@staticmethod
def convert_file(from_file, to_file, converter):
print(from_file)
with open(to_file, "w+") as open_file:
tree = ET.parse(from_file)
xml = tree.getroot()
for item in xml:
name = None
if 'name' in item.attrib:
name = item.attrib['name'].strip()
value = item.attrib['value'].strip()
seperator = ":"
if 'seperator' in converter:
seperator = converter['seperator']
ignores = []
if 'ignores' in converter:
ignores = converter['ignores']
if name:
if name not in ignores:
open_file.write("%s%s%s\n" % (name, seperator, value))
else:
open_file.write("%s\n" % value)
def convert(self, from_files, to_files):
files = self.all_files(from_files)
for from_file in files:
filename = self.get_filename(from_file)
if filename in file_converters:
converter = file_converters[filename]
to_file = to_files + sep + converter["filename"]
self.convert_file(from_file, to_file, converter)
if __name__ == '__main__':
from_files = "./pbfiles"
to_files = "./yfiles"
converter = ":"
converter = PandoraBotsFileConverter()
converter.convert(from_files, to_files)
|
11516893
|
from agent.geometric.util import batch_to_gd
from torch_geometric.nn import RGCNConv
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from typing import List
def parse_code(net_code: str):
"""
:param net_code: format <a>g[m]<b>f
"""
assert net_code[1]=="g"
assert net_code[-1]=="f"
nb_gnn_layers = int(net_code[0])
nb_dense_layers = int(net_code[-2])
is_max = True if net_code[2] == "m" else False
return nb_gnn_layers, nb_dense_layers, is_max
class GNNAgent(nn.Module):
def __init__(self, obj_n: int, action_n: int, input_dims:List[int], type: str, embedding_size=16, net_code="2g0f", mp_rounds=1):
super().__init__()
nb_edge_types = input_dims[2]
nb_layers, nb_dense_layers, self.max_reduce = parse_code(net_code)
self.embedding_linear = nn.Linear(input_dims[1], embedding_size)
gnn_layers = []
for i in range(nb_layers):
gnn_layers.append(RGCNConv(embedding_size, embedding_size, nb_edge_types))
self.gnn_layers = nn.ModuleList(gnn_layers)
dense_layers = []
for i in range(nb_dense_layers):
if i == 0:
if self.max_reduce:
dense_layers.append(nn.Linear(embedding_size, 128))
else:
dense_layers.append(nn.Linear(embedding_size*obj_n, 128))
else:
dense_layers.append(nn.Linear(128, 128))
dense_layers.append(nn.ReLU())
self.dense = nn.Sequential(*dense_layers)
self.num_actions = action_n
if nb_dense_layers == 0:
self.policy_linear = nn.Linear(embedding_size, self.num_actions)
self.baseline_linear = nn.Linear(embedding_size, 1)
else:
self.policy_linear = nn.Linear(128, self.num_actions)
self.baseline_linear = nn.Linear(128, 1)
self.mp_rounds = mp_rounds
self.nb_dense_layers = nb_dense_layers
def forward(self, obs, core_state=()):
T, B, *_ = obs["unary_tensor"].shape
device=next(self.parameters()).device
inputs = [[],
torch.flatten(obs["unary_tensor"], 0, 1).float(),
torch.flatten(obs["binary_tensor"], 0, 1).permute(0,3,1,2).float()]
if "nullary_tensor" in obs:
inputs[0] = torch.flatten(obs["nullary_tensor"], 0, 1).float()
for i in [1,2]:
inputs[i] = inputs[i].to(device=device)
adj_matrices = inputs[2]
gd, slices = batch_to_gd(adj_matrices)
embedds = torch.flatten(inputs[1], 0, 1)
embedds = self.embedding_linear(embedds)
for layer in self.gnn_layers:
for _ in range(self.mp_rounds):
embedds = layer.forward(embedds, gd.edge_index, gd.edge_attr)
embedds = torch.relu(embedds)
chunks = torch.split(embedds, slices, dim=0)
chunks = [p.unsqueeze(0) for p in chunks]
x = torch.cat(chunks, dim=0)
if self.max_reduce:
x, _ = torch.max(x, dim=1)
else:
x = torch.flatten(x, start_dim=1, end_dim=2)
x = self.dense(x)
policy_logits = self.policy_linear(x)
baseline = self.baseline_linear(x)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
if self.training:
return dict(policy_logits=policy_logits, baseline=baseline, action=action)
else:
return dict(policy_logits=policy_logits, baseline=baseline, action=action)
|
11516901
|
from django.contrib import admin
from .models import User, Lead, Agent, UserProfile, Category, FollowUp
class LeadAdmin(admin.ModelAdmin):
# fields = (
# 'first_name',
# 'last_name',
# )
list_display = ['first_name', 'last_name', 'age', 'email']
list_display_links = ['first_name']
list_editable = ['last_name']
list_filter = ['category']
search_fields = ['first_name', 'last_name', 'email']
admin.site.register(Category)
admin.site.register(User)
admin.site.register(UserProfile)
admin.site.register(Lead, LeadAdmin)
admin.site.register(Agent)
admin.site.register(FollowUp)
|
11516909
|
import subprocess
import pandas as pd
import pybel
import numpy as np
import platform
def tail(f, lines=1):
return [x.strip() for x in subprocess.check_output(['tail', '-n%s' % lines, f]).decode('ascii').split('\n')]
def read_string(path):
'''Reads first line from a file.'''
with open(path, 'r') as f:
return f.readlines()[0].strip()
def write_string(string, path):
'''Writes a string to file.'''
with open(path, 'w') as f:
f.write(string.strip() + '\n')
def read_mass(path):
'''Reads mass from molmass.py output file.'''
with open(path, 'r') as f:
lines = f.readlines()
for x in lines:
if 'Monoisotopic mass' in x:
return float(x.split()[-1])
def read_pka(path):
'''Reads pKa from cxcalc output'''
df = pd.read_csv(path, sep='\t')
# offset indices because cxcalc is 1-based
idx = [int(x) - 1 for x in df['atoms'].values[0].split(',')]
pk = df.values[0][1:5]
label = ['a1', 'a2', 'b1', 'b2']
res = {}
i = 0
for x, p in zip(label, pk):
if not np.isnan(p):
res[x] = idx[i]
i += 1
return res
def read_mol(path, fmt='mol2'):
return Mol(next(pybel.readfile(fmt, path)))
class Mol(pybel.Molecule):
def __init__(self, mol):
super().__init__(mol)
def total_partial_charge(self):
return np.array([a.partialcharge for a in self.atoms]).sum()
def natoms(self):
return len(self.atoms)
def pop_atom(path, output, atom='Na'):
to_remove = []
to_save = []
with open(path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if i == 2:
info = [int(x) for x in line.split()]
elif atom.upper() in line:
to_remove.append(i)
to_save.append(line)
change = len(to_remove)
with open(output, 'w') as f:
for i, line in enumerate(lines):
if i == 2:
info[0] -= change
f.write(' %s %s %s %s %s\n' % tuple(info))
elif i in to_remove:
pass
else:
f.write(line)
return to_remove, to_save
def push_atom(path, output, idx, content):
with open(path, 'r') as f:
lines = f.readlines()
info = [int(x) for x in lines[2].split()]
for i, line in zip(idx, content):
if 'NA' in line:
parts = line.split()
parts[1] = 'NA'
parts[5] = 'Na+'
parts[6] = '2'
parts[7] = 'Na+'
parts[8] = '1.0000'
line = ' '.join(parts) + '\n'
lines.insert(i + 1, line)
change = len(idx)
with open(output, 'w') as f:
for i, line in enumerate(lines):
if i == 2:
info[0] += change
f.write(' %s %s %s %s %s\n' % tuple(info))
else:
f.write(line)
def getOS():
system = platform.system().lower()
if system == 'darwin':
return 'osx'
return system
def cycles(n):
return ['%03d' % x for x in range(1, n + 1)]
def frames(n):
return ['%03d' % x for x in range(n)]
def smi2key(smi):
try:
res = subprocess.check_output('obabel -:"%s" -oinchikey' % smi,
stderr=subprocess.STDOUT, shell=True).decode('ascii')
except:
return None
res = [x.strip() for x in res.split('\n') if x is not '']
if 'molecule converted' in res[-1]:
return res[-2]
return None
def inchi2key(inchi):
try:
res = subprocess.check_output('echo "%s" | obabel -iinchi -oinchikey' % inchi,
stderr=subprocess.STDOUT, shell=True).decode('ascii')
except:
return None
res = [x.strip() for x in res.split('\n') if x is not '']
if 'molecule converted' in res[-1]:
return res[-2]
return None
|
11516924
|
import json
import logging
import os
from collections import Counter
from typing import Any, Dict, List, Tuple
import posthoganalytics
from django.db import connection
from psycopg2 import sql
from posthog.models import Event, Organization, Person, Team, User
from posthog.models.dashboard import Dashboard
from posthog.models.feature_flag import FeatureFlag
from posthog.models.plugin import PluginConfig
from posthog.models.utils import namedtuplefetchall
from posthog.settings import EE_AVAILABLE
from posthog.utils import (
get_helm_info_env,
get_instance_realm,
get_machine_id,
get_previous_week,
is_clickhouse_enabled,
)
from posthog.version import VERSION
logger = logging.getLogger(__name__)
def status_report(*, dry_run: bool = False) -> Dict[str, Any]:
period_start, period_end = get_previous_week()
report: Dict[str, Any] = {
"posthog_version": VERSION,
"deployment": os.getenv("DEPLOYMENT", "unknown"),
"realm": get_instance_realm(),
"period": {"start_inclusive": period_start.isoformat(), "end_inclusive": period_end.isoformat()},
"site_url": os.getenv("SITE_URL", "unknown"),
"license_keys": get_instance_licenses(),
}
report["helm"] = get_helm_info_env()
report["users_who_logged_in"] = [
{"id": user.id, "distinct_id": user.distinct_id}
if user.anonymize_data
else {"id": user.id, "distinct_id": user.distinct_id, "first_name": user.first_name, "email": user.email}
for user in User.objects.filter(last_login__gte=period_start)
]
report["teams"] = {}
report["table_sizes"] = {
"posthog_event": fetch_table_size("posthog_event"),
"posthog_sessionrecordingevent": fetch_table_size("posthog_sessionrecordingevent"),
}
plugin_configs = PluginConfig.objects.select_related("plugin").all()
report["plugins_installed"] = Counter((plugin_config.plugin.name for plugin_config in plugin_configs))
report["plugins_enabled"] = Counter(
(plugin_config.plugin.name for plugin_config in plugin_configs if plugin_config.enabled)
)
instance_usage_summary: Dict[str, int] = {
"events_count_new_in_period": 0,
"persons_count_new_in_period": 0,
"persons_count_total": 0,
"events_count_total": 0,
"dashboards_count": 0,
"ff_count": 0,
}
for team in Team.objects.exclude(organization__for_internal_metrics=True):
try:
params = (team.id, report["period"]["start_inclusive"], report["period"]["end_inclusive"])
team_report: Dict[str, Any] = {}
if is_clickhouse_enabled():
# pull events stats from clickhouse
from ee.clickhouse.models.event import (
get_event_count_for_team,
get_event_count_for_team_and_period,
get_events_count_for_team_by_client_lib,
get_events_count_for_team_by_event_type,
)
from ee.clickhouse.models.person import (
count_duplicate_distinct_ids_for_team,
count_total_persons_with_multiple_ids,
)
team_event_count = get_event_count_for_team(team.id)
instance_usage_summary["events_count_total"] += team_event_count
team_report["events_count_total"] = team_event_count
team_events_in_period_count = get_event_count_for_team_and_period(team.id, period_start, period_end)
team_report["events_count_new_in_period"] = team_events_in_period_count
instance_usage_summary["events_count_new_in_period"] += team_report["events_count_new_in_period"]
team_report["events_count_by_lib"] = get_events_count_for_team_by_client_lib(
team.id, period_start, period_end
)
team_report["events_count_by_name"] = get_events_count_for_team_by_event_type(
team.id, period_start, period_end
)
team_report["duplicate_distinct_ids"] = count_duplicate_distinct_ids_for_team(team.id)
team_report["multiple_ids_per_person"] = count_total_persons_with_multiple_ids(team.id)
else:
# pull events stats from postgres
events_considered_total = Event.objects.filter(team_id=team.id)
instance_usage_summary["events_count_total"] += events_considered_total.count()
events_considered_new_in_period = events_considered_total.filter(
timestamp__gte=period_start, timestamp__lte=period_end,
)
team_report["events_count_total"] = events_considered_total.count()
team_report["events_count_new_in_period"] = events_considered_new_in_period.count()
instance_usage_summary["events_count_new_in_period"] += team_report["events_count_new_in_period"]
team_report["events_count_by_lib"] = fetch_event_counts_by_lib(params)
team_report["events_count_by_name"] = fetch_events_count_by_name(params)
# pull person stats and the rest here from Postgres always
persons_considered_total = Person.objects.filter(team_id=team.id)
persons_considered_total_new_in_period = persons_considered_total.filter(
created_at__gte=period_start, created_at__lte=period_end,
)
team_report["persons_count_total"] = persons_considered_total.count()
instance_usage_summary["persons_count_total"] += team_report["persons_count_total"]
team_report["persons_count_new_in_period"] = persons_considered_total_new_in_period.count()
instance_usage_summary["persons_count_new_in_period"] += team_report["persons_count_new_in_period"]
# Dashboards
team_dashboards = Dashboard.objects.filter(team=team).exclude(deleted=True)
team_report["dashboards_count"] = team_dashboards.count()
instance_usage_summary["dashboards_count"] += team_report["dashboards_count"]
team_report["dashboards_template_count"] = team_dashboards.filter(creation_mode="template").count()
team_report["dashboards_shared_count"] = team_dashboards.filter(is_shared=True).count()
team_report["dashboards_tagged_count"] = team_dashboards.exclude(tags=[]).count()
# Feature Flags
feature_flags = FeatureFlag.objects.filter(team=team).exclude(deleted=True)
team_report["ff_count"] = feature_flags.count()
instance_usage_summary["ff_count"] += team_report["ff_count"]
team_report["ff_active_count"] = feature_flags.filter(active=True).count()
report["teams"][team.id] = team_report
except Exception as err:
capture_event("instance status report failure", {"error": str(err)}, dry_run=dry_run)
report["instance_usage_summary"] = instance_usage_summary
capture_event("instance status report", report, dry_run=dry_run)
return report
def capture_event(name: str, report: Dict[str, Any], dry_run: bool) -> None:
if not dry_run:
posthoganalytics.api_key = "<KEY>"
posthoganalytics.capture(get_machine_id(), name, {**report, "scope": "machine"})
for user in User.objects.all():
posthoganalytics.capture(user.distinct_id, f"user {name}", {**report, "scope": "user"})
else:
print(name, json.dumps(report)) # noqa: T001
def fetch_event_counts_by_lib(params: Tuple[Any, ...]) -> dict:
results = fetch_sql(
"""
SELECT properties->>'$lib' as lib, COUNT(1) as count
FROM posthog_event WHERE team_id = %s AND timestamp >= %s AND timestamp <= %s
GROUP BY lib
""",
params,
)
return {result.lib: result.count for result in results}
def fetch_events_count_by_name(params: Tuple[Any, ...]) -> dict:
results = fetch_sql(
"""
SELECT event as name, COUNT(1) as count
FROM posthog_event WHERE team_id = %s AND timestamp >= %s AND timestamp <= %s
GROUP BY name
""",
params,
)
return {result.name: result.count for result in results}
def fetch_table_size(table_name: str) -> int:
return fetch_sql("SELECT pg_total_relation_size(%s) as size", (table_name,))[0].size
def fetch_sql(sql_: str, params: Tuple[Any, ...]) -> List[Any]:
with connection.cursor() as cursor:
cursor.execute(sql.SQL(sql_), params)
return namedtuplefetchall(cursor)
def get_instance_licenses() -> List[str]:
if EE_AVAILABLE:
from ee.models import License
return [license.key for license in License.objects.all()]
else:
return []
|
11516926
|
import numpy as np
import torch
from COMBO.acquisition.acquisition_optimizers.graph_utils import neighbors
from COMBO.acquisition.acquisition_marginalization import acquisition_expectation
from COMBO.acquisition.acquisition_functions import expected_improvement
N_RANDOM_VERTICES = 20000
N_GREEDY_ASCENT_INIT = 20
N_SPRAY = 10
def optim_inits(x_opt, inference_samples, partition_samples, edge_mat_samples, n_vertices,
acquisition_func=expected_improvement, reference=None):
"""
:param x_opt: 1D Tensor
:param inference_samples:
:param partition_samples:
:param edge_mat_samples:
:param n_vertices:
:param acquisition_func:
:param reference:
:return:
"""
rnd_nbd = torch.cat(tuple([torch.randint(low=0, high=int(n_v), size=(N_RANDOM_VERTICES, 1)) for n_v in n_vertices]), dim=1).long()
min_nbd = neighbors(x_opt, partition_samples, edge_mat_samples, n_vertices, uniquely=False)
shuffled_ind = list(range(min_nbd.size(0)))
np.random.shuffle(shuffled_ind)
x_init_candidates = torch.cat(tuple([min_nbd[shuffled_ind[:N_SPRAY]], rnd_nbd]), dim=0)
acquisition_values = acquisition_expectation(x_init_candidates, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)
nonnan_ind = ~torch.isnan(acquisition_values).squeeze(1)
x_init_candidates = x_init_candidates[nonnan_ind]
acquisition_values = acquisition_values[nonnan_ind]
acquisition_sorted, acquisition_sort_ind = torch.sort(acquisition_values.squeeze(1), descending=True)
x_init_candidates = x_init_candidates[acquisition_sort_ind]
return x_init_candidates[:N_GREEDY_ASCENT_INIT], acquisition_sorted[:N_GREEDY_ASCENT_INIT]
|
11516930
|
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try:
import xattr
except ImportError:
xattr = None
def _reverseString(s):
s = list(s)
s.reverse()
return strjoin(s)
def getMacCreatorAndType(path):
"""Returns file creator and file type codes for a path.
Args:
path (str): A file path.
Returns:
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
except (KeyError, IOError):
pass
else:
fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType
return None, None
def setMacCreatorAndType(path, fileCreator, fileType):
"""Set file creator and file type codes for a path.
Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised.
Args:
path (str): A file path.
fileCreator: A four-character file creator tag.
fileType: A four-character file type tag.
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError('arg must be string of 4 chars')
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
|
11516950
|
from abc import ABCMeta, abstractmethod
# support for both python 2 and 3
from future.utils import with_metaclass
class LieGroupBase(with_metaclass(ABCMeta)):
""" Common abstract base class defining basic interface for Lie groups.
Does not depend on any specific linear algebra library.
"""
def __init__(self):
pass
@property
@classmethod
@abstractmethod
def dim(cls):
"""Dimension of the underlying representation."""
pass
@property
@classmethod
@abstractmethod
def dof(cls):
"""Underlying degrees of freedom (i.e., dimension of the tangent space)."""
pass
@abstractmethod
def dot(self, other):
"""Multiply another group element or one or more vectors on the left.
"""
pass
@classmethod
@abstractmethod
def exp(cls, vec):
"""Exponential map for the group.
Computes a transformation from a tangent vector.
This is the inverse operation to log.
"""
pass
@classmethod
@abstractmethod
def identity(cls):
"""Return the identity transformation."""
pass
@abstractmethod
def inv(self):
"""Return the inverse transformation."""
pass
@abstractmethod
def log(self):
"""Logarithmic map for the group.
Computes a tangent vector from a transformation.
This is the inverse operation to exp.
"""
pass
@abstractmethod
def normalize(self):
"""Normalize the group element to ensure it is valid and
negate the effect of rounding errors.
"""
pass
@abstractmethod
def perturb(self, vec):
"""Perturb the group element on the left by a vector in its local tangent space.
"""
pass
class MatrixLieGroupBase(LieGroupBase):
"""Common abstract base class defining basic interface for Matrix Lie Groups.
Does not depend on any specific linear algebra library.
"""
def __repr__(self):
"""Return a string representation of the transformation."""
return "<{}.{}>\n{}".format(self.__class__.__module__, self.__class__.__name__, self.as_matrix()).replace("\n", "\n| ")
@abstractmethod
def adjoint(self):
"""Return the adjoint matrix of the transformation."""
pass
@abstractmethod
def as_matrix(self):
"""Return the matrix representation of the transformation."""
pass
@classmethod
@abstractmethod
def from_matrix(cls, mat, normalize=False):
"""Create a transformation from a matrix (safe, but slower)."""
pass
@classmethod
@abstractmethod
def inv_left_jacobian(cls, vec):
"""Inverse of the left Jacobian for the group."""
pass
@classmethod
@abstractmethod
def is_valid_matrix(cls, mat):
"""Check if a matrix is a valid transformation matrix."""
pass
@classmethod
@abstractmethod
def left_jacobian(cls, vec):
"""Left Jacobian for the group."""
pass
@classmethod
@abstractmethod
def vee(cls, mat):
"""vee operator as defined by Barfoot.
This is the inverse operation to wedge.
"""
pass
@classmethod
@abstractmethod
def wedge(cls, vec):
"""wedge operator as defined by Barfoot.
This is the inverse operation to vee.
"""
pass
class SOMatrixBase(MatrixLieGroupBase):
"""Common abstract base class for Special Orthogonal Matrix Lie Groups SO(N).
Does not depend on any specific linear algebra library.
"""
def __init__(self, mat):
"""Create a transformation from a rotation matrix (unsafe, but faster)."""
self.mat = mat
"""Storage for the rotation matrix."""
def as_matrix(self):
"""Return the matrix representation of the rotation."""
return self.mat
def perturb(self, phi):
"""Perturb the rotation in-place on the left by a vector in its local tangent space.
.. math::
\\mathbf{C} \\gets \\exp(\\boldsymbol{\\phi}^\\wedge) \\mathbf{C}
"""
self.mat = self.__class__.exp(phi).dot(self).mat
class SEMatrixBase(MatrixLieGroupBase):
"""Common abstract base class for Special Euclidean Matrix Lie Groups SE(N).
Does not depend on any specific linear algebra library.
"""
def __init__(self, rot, trans):
"""Create a transformation from a translation and a rotation (unsafe, but faster)"""
self.rot = rot
"""Storage for the rotation matrix."""
self.trans = trans
"""Storage for the translation vector."""
@classmethod
@abstractmethod
def odot(cls, p, directional=False):
"""odot operator as defined by Barfoot."""
pass
def perturb(self, xi):
"""Perturb the transformation in-place on the left by a vector in its local tangent space.
.. math::
\\mathbf{T} \\gets \\exp(\\boldsymbol{\\xi}^\\wedge) \\mathbf{T}
"""
perturbed = self.__class__.exp(xi).dot(self)
self.rot = perturbed.rot
self.trans = perturbed.trans
@property
@classmethod
@abstractmethod
def RotationType(cls):
"""Rotation type."""
pass
class VectorLieGroupBase(LieGroupBase):
"""Common abstract base class for Lie Groups with vector parametrizations
(complex, quaternions, dual quaternions). Does not depend on any
specific linear algebra library.
"""
def __init__(self, data):
self.data = data
def __repr__(self):
"""Return a string representation of the transformation."""
return "<{}.{}>\n{}".format(self.__class__.__module__, self.__class__.__name__, self.data).replace("\n", "\n| ")
@abstractmethod
def conjugate(self):
"""Return the conjugate of the vector"""
pass
|
11516982
|
import numpy as np
import tensorflow as tf
import classify_common
batch_size = 350 # %%% 128
# learning_rate = 0.0001 # for opt and optens
learning_rate = 0.001
train_steps = 4000
dist_dims = 1000
purity_count = 3
# usage: python classify.py <modelname> <adv_set>
num_classes = 1000
import sys
modelname, adv_set = sys.argv[1:]
# Assemble input
# %%% TODO: add opt dist too
p_dist_benign = tf.placeholder(shape=[None, dist_dims], dtype=tf.float32)
p_purity_benign = tf.placeholder(shape=[None, purity_count], dtype=tf.float32)
p_dist_adv = tf.placeholder(shape=[None, dist_dims], dtype=tf.float32)
p_purity_adv = tf.placeholder(shape=[None, purity_count], dtype=tf.float32)
p_correct_benign = tf.placeholder(shape=[None], dtype=tf.bool)
p_correct_adv = tf.placeholder(shape=[None], dtype=tf.bool)
dat = tf.contrib.data.Dataset.from_tensor_slices([p_dist_benign, p_purity_benign, p_correct_benign, p_dist_adv, p_purity_adv, p_correct_adv])
dat = dat.shuffle(350)
dat = dat.repeat()
dat = dat.batch(batch_size)
di = dat.make_initializable_iterator()
# -
b_dist_benign, b_purity_benign, b_correct_benign, b_dist_adv, b_purity_adv, b_correct_adv = di.get_next()
x_dist = tf.concat([b_dist_benign, b_dist_adv], axis=0)
x_purity = tf.concat([b_purity_benign, b_purity_adv], axis=0)
logits = classify_common.m(x_dist, x_purity, training=True)
b_adv_success = tf.logical_and(b_correct_benign, tf.logical_not(b_correct_adv))
# use_mask = tf.cast(tf.concat([b_correct_benign, b_adv_success], axis=0), tf.float32)
# %%%
f_correct_benign = tf.cast(b_correct_benign, tf.float32)
f_correct_benign /= tf.reduce_mean(f_correct_benign)
f_adv_success = tf.cast(b_adv_success, tf.float32)
f_adv_success /= tf.reduce_mean(f_adv_success)
use_mask = tf.concat([f_correct_benign, f_adv_success], axis=0)
saver = tf.train.Saver()
# Batches are half benign and half adversarial
labels = tf.concat([tf.zeros(batch_size, dtype=tf.int64), tf.ones(batch_size, dtype=tf.int64)], axis=0)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
loss = loss * use_mask # only learn from correct benign and successful adv.
opt = tf.train.AdamOptimizer(learning_rate)
train_step = opt.minimize(loss)
pred = tf.argmax(logits, axis=1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, labels), tf.float32))
sess = tf.Session()
# Load data
print 'load, preprocess, init' # %%%
def load_seq(template, count):
return np.asarray([np.load(template % j) for j in range(count)])
def load_dist(setname):
dist = load_seq('gxr3_%s/%s%%d_dist.npy' % (modelname, setname), 450)
# dist = np.load('gxr3big_mnist/%s/%s_dist.npy' % (modelname, setname), 'r')
dist = np.sort(dist, axis=1)
return dist
dist_benign = load_dist('none')
dist_adv = load_dist(adv_set)
def compute_purity(b):
b = b[b < num_classes]
c = np.bincount(b, minlength=num_classes)
cs = np.sort(c)[::-1]
cscs = np.cumsum(cs)
prop = cscs / float(max(1, len(b)))
return prop
def load_purity(setname):
boundary = load_seq('gxr3_%s/%s%%d_boundary.npy' % (modelname, setname), 450)
# boundary = np.load('gxr3big_mnist/%s/%s_boundary.npy' % (modelname, setname), 'r')
purity = np.asarray([compute_purity(b) for b in boundary])
return purity
purity_benign = load_purity('none')
purity_adv = load_purity(adv_set)
correctness_benign = np.load('correctness_%s/%s.npy' % (modelname, 'none'), 'r')
correctness_adv = np.load('correctness_%s/%s.npy' % (modelname, adv_set), 'r')
train_split = 350
sess.run(di.initializer, feed_dict={
p_dist_benign: dist_benign[:train_split],
p_purity_benign: purity_benign[:train_split, :purity_count],
p_dist_adv: dist_adv[:train_split],
p_purity_adv: purity_adv[:train_split, :purity_count],
p_correct_benign: correctness_benign[:train_split],
p_correct_adv: correctness_adv[:train_split],
})
sess.run(tf.global_variables_initializer())
print 'done' # %%%
# Run training
for i in range(train_steps):
train_acc, _ = sess.run([accuracy, train_step])
if i % 50 == 0: # ceil(train_split / batch_size)
print 'step', i, 'train accuracy', train_acc
# Save result
saver.save(sess, 'classifier_models/%s_%s' % (modelname, adv_set))
# # below is invalid due to dropout \:
# test_acc = sess.run(accuracy, feed_dict={
# b_dist_benign: dist_benign[train_split:],
# b_purity_benign: purity_benign[train_split:, :purity_count],
# b_dist_adv: dist_adv[train_split:],
# b_purity_adv: purity_adv[train_split:, :purity_count],
# })
# print 'test accuracy', test_acc
|
11517040
|
import RPi.GPIO as GPIO
from time
import sleep
GPIO.setmode(GPIO.BCM)
Motor1R = 20
Motor1L = 21
GPIO.setup(Motor1R, GPIO.OUT)
GPIO.setup(Motor1L, GPIO.OUT)
pwm = GPIO.PWM(Motor1R, 100)
pwm.start(0)
try:
while True:
GPIO.output(Motor1L, GPIO.LOW)
for i in range(0, 101):
pwm.ChangeDutyCycle(i)
sleep(0.1)
except KeyboardInterrupt:
pwm.stop()
GPIO.cleanup()
|
11517058
|
import tkinter as tk
from tkinter import filedialog
from PIL import Image
root = tk.Tk()
canvas1 = tk.Canvas(root, width=300, height=250, bg='azure3', relief='raised')
canvas1.pack()
label1 = tk.Label(root, text="Image Converter", bg='azure3')
label1.config(font=('helvetica', 20))
canvas1.create_window(150, 60, window=label1)
def getPNG():
global im1
import_file_path = filedialog.askopenfilename()
im1 = Image.open(import_file_path)
browse_png = tk.Button(text="Select PNG file", command=getPNG, bg="royalblue", fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 130, window=browse_png)
def convert():
global im1
export_file_path = filedialog.asksaveasfilename(defaultextension='.jpg')
im1.save(export_file_path)
saveasbutton = tk.Button(text="Convert PNG to JPG", command=convert, bg='royalblue', fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 180, window=saveasbutton)
root.mainloop()
|
11517063
|
from dataclasses import dataclass
@dataclass
class SampledSequencesDTO:
input: str
output: str
nll: float
|
11517071
|
import os, sys, imp
from mirage.core.app import App
from mirage.libs.utils import getHomeDir,generateScenariosDictionary
if App.Instance is not None:
# Scenarios Directory
SCENARIOS_DIR = os.path.abspath(os.path.dirname(__file__))
SCENARIOS_USER_DIR = getHomeDir() + "/scenarios"
__scenarios__ = generateScenariosDictionary(SCENARIOS_DIR, SCENARIOS_USER_DIR)
'''
# Insertion of the root directory in the PYTHON PATH
#sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)+"/.."))
# Creation of the list of scenarios
__scenarios__ = {}
for scenario in os.listdir(SCENARIOS_DIR):
if os.path.isfile(SCENARIOS_DIR+"/"+scenario) and scenario[-3:] == ".py" and scenario != "__init__.py":
__scenarios__[scenario[:-3]]=imp.load_source(scenario[:-3],SCENARIOS_DIR + "/"+scenario)
for scenario in os.listdir(SCENARIOS_USER_DIR):
if os.path.isfile(SCENARIOS_USER_DIR+"/"+scenario) and scenario[-3:] == ".py" and scenario != "__init__.py":
__scenarios__[scenario[:-3]]=imp.load_source(scenario[:-3],SCENARIOS_USER_DIR + "/"+scenario)
'''
|
11517110
|
import json
import requests # http requests
BASE_URL = "http://127.0.0.1:8000/"
ENDPOINT = "api/updates/"
def get_list(id=None): #--> Lists all this out
data = json.dumps({})
if id is not None:
data = json.dumps({"id": id})
r = requests.get(BASE_URL + ENDPOINT, data=data)
print(r.status_code)
status_code = r.status_code
if status_code != 200: # not found
print('probably not good sign?')
data = r.json()
return data
def create_update():
new_data = {
'user': 1,
"content": "Another more cool content"
}
r = requests.post(BASE_URL + ENDPOINT, data=json.dumps(new_data))
print(r.headers)
print(r.status_code)
if r.status_code == requests.codes.ok:
#print(r.json())
return r.json()
return r.text
print(get_list())
# print(create_update())
def do_obj_update():
new_data = {
"id": 3,
"content": "awesomer"
}
r = requests.put(BASE_URL + ENDPOINT, data=json.dumps(new_data))
# new_data = {
# 'id': 1
# "content": "Another more cool content"
# }
# r = requests.put(BASE_URL + ENDPOINT, data=new_data)
#print(r.headers)
print(r.status_code)
if r.status_code == requests.codes.ok:
#print(r.json())
return r.json()
return r.text
def do_obj_delete():
new_data = {
"id": 3
}
r = requests.delete(BASE_URL + ENDPOINT, data=json.dumps(new_data))
# new_data = {
# 'id': 1
# "content": "Another more cool content"
# }
# r = requests.put(BASE_URL + ENDPOINT, data=new_data)
#print(r.headers)
print(r.status_code)
if r.status_code == requests.codes.ok:
#print(r.json())
return r.json()
return r.text
# print(do_obj_update())
|
11517159
|
from adapters.osram.smart_mini_switch import SmartMiniSwitch
osram_adapters = {
'AC0251100NJ': SmartMiniSwitch, # OSRAM SMART+ Switch Mini
'AC0251100NJ/AC0251700NJ': SmartMiniSwitch, # OSRAM SMART+ Switch Mini
}
|
11517191
|
import gzip
from abc import ABC
from pathlib import Path
from urllib import request
import numpy as np
from slick_dnn.data import Dataset
class MNISTDataSet(Dataset, ABC):
@staticmethod
def _load_mnist(images_path: str, labels_path: str, flatten_input, one_hot_output):
with open(images_path, 'rb') as f:
new_shape = (-1, 28 * 28) if flatten_input else (-1, 1, 28, 28)
data = np.frombuffer(f.read(), np.uint8, offset=16).reshape(new_shape)
with open(labels_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
if one_hot_output:
b = np.zeros((labels.size, 10))
b[np.arange(labels.size), labels] = 1
labels = b
return data, labels
@staticmethod
def download_if_not_exists(url, path):
my_file = Path(path)
if not my_file.exists():
response = request.urlopen(url)
with open(path, "wb") as f:
f.write(response.read())
with gzip.open(path, 'rb') as zipped:
out = zipped.read()
with open(path, 'wb') as f_out:
f_out.write(out)
def __init__(self,
images_path: str = None,
labels_path=None,
flatten_input=True,
one_hot_output=True):
self.flatten_input = flatten_input
self.one_hot_output = one_hot_output
if images_path is None:
self.images_path = './unknown-images.idx3-ubyte'
else:
self.images_path = images_path
if labels_path is None:
self.labels_path = './unknown-labels.idx3-ubyte'
else:
self.labels_path = labels_path
class MNISTTrainDataSet(MNISTDataSet):
train_images_url = "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"
train_labels_url = "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"
def __init__(self,
training_images_path: str = None,
training_labels_path=None,
flatten_input=True,
one_hot_output=True,
input_normalization=None):
if training_images_path is None:
training_images_path = './training-images.idx3-ubyte'
if training_labels_path is None:
training_labels_path = './training-labels.idx3-ubyte'
super().__init__(training_images_path, training_labels_path, flatten_input, one_hot_output)
self.download_if_not_exists(self.train_images_url, self.images_path)
self.download_if_not_exists(self.train_labels_url, self.labels_path)
self.loaded_data, self.loaded_labels = self._load_mnist(self.images_path,
self.labels_path,
self.flatten_input,
self.one_hot_output)
self.input_normalization = input_normalization
def __getitem__(self, idx):
data_in = self.loaded_data[idx]/255
data_out = self.loaded_labels[idx]
if self.input_normalization is not None:
data_in = np.where(data_in != 0, (data_in - self.input_normalization[0]) / self.input_normalization[1], 0)
return data_in, data_out
def __len__(self):
return len(self.loaded_data)
class MNISTTestDataSet(MNISTDataSet):
test_images_url = "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"
test_labels_url = "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"
def __init__(self,
testing_images_path: str = None,
testing_labels_path=None,
flatten_input=True,
one_hot_output=True,
input_normalization=None):
if testing_images_path is None:
testing_images_path = './testing-images.idx3-ubyte'
if testing_labels_path is None:
testing_labels_path = './testing-labels.idx3-ubyte'
super().__init__(testing_images_path, testing_labels_path, flatten_input, one_hot_output)
self.download_if_not_exists(self.test_images_url, self.images_path)
self.download_if_not_exists(self.test_labels_url, self.labels_path)
self.loaded_data, self.loaded_labels = self._load_mnist(self.images_path,
self.labels_path,
self.flatten_input,
self.one_hot_output)
self.input_normalization = input_normalization
def __getitem__(self, idx):
data_in = self.loaded_data[idx]/255
data_out = self.loaded_labels[idx]
if self.input_normalization is not None:
data_in = np.where(data_in != 0, (data_in - self.input_normalization[0]) / self.input_normalization[1], 0)
return data_in, data_out
def __len__(self):
return len(self.loaded_data)
|
11517229
|
from __future__ import print_function
import numpy as np
from numba import cuda, vectorize, guvectorize
from numba import unittest_support as unittest
from numba.numpy_support import from_dtype
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin, skip_on_cudasim
class TestCudaDateTime(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
def test_scalar_datetime_kernel(self):
@cuda.jit
def foo(dates, target, delta, matches, outdelta):
for i in range(cuda.grid(1), matches.size, cuda.gridsize(1)):
matches[i] = dates[i] == target
outdelta[i] = dates[i] - delta
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
target = arr1[5] # datetime
delta = arr1[6] - arr1[5] # timedelta
matches = np.zeros_like(arr1, dtype=np.bool_)
outdelta = np.zeros_like(arr1, dtype='datetime64[D]')
foo[1, 32](arr1, target, delta, matches, outdelta)
where = matches.nonzero()
self.assertEqual(list(where), [5])
self.assertPreciseEqual(outdelta, arr1 - delta)
@skip_on_cudasim('ufunc API unsupported in the simulator')
def test_ufunc(self):
datetime_t = from_dtype(np.dtype('datetime64[D]'))
@vectorize([(datetime_t, datetime_t)], target='cuda')
def timediff(start, end):
return end - start
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = timediff(arr1, arr2)
self.assertPreciseEqual(delta, arr2 - arr1)
@skip_on_cudasim('ufunc API unsupported in the simulator')
def test_gufunc(self):
datetime_t = from_dtype(np.dtype('datetime64[D]'))
timedelta_t = from_dtype(np.dtype('timedelta64[D]'))
@guvectorize([(datetime_t, datetime_t, timedelta_t[:])], '(),()->()',
target='cuda')
def timediff(start, end, out):
out[0] = end - start
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = timediff(arr1, arr2)
self.assertPreciseEqual(delta, arr2 - arr1)
@skip_on_cudasim('no .copy_to_host() in the simulator')
def test_datetime_view_as_int64(self):
arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
darr = cuda.to_device(arr)
viewed = darr.view(np.int64)
self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())
self.assertEqual(viewed.gpu_data, darr.gpu_data)
@skip_on_cudasim('no .copy_to_host() in the simulator')
def test_timedelta_view_as_int64(self):
arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr = arr - (arr - 1)
self.assertEqual(arr.dtype, np.dtype('timedelta64[D]'))
darr = cuda.to_device(arr)
viewed = darr.view(np.int64)
self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())
self.assertEqual(viewed.gpu_data, darr.gpu_data)
if __name__ == '__main__':
unittest.main()
|
11517258
|
from __future__ import print_function
try:
input = raw_input
except:
pass
# Neutron
directions = {
'N': (0, 1),
'NE': (1, 1),
'E': (1, 0),
'SE': (1, -1),
'S': (0, -1),
'SW': (-1, -1),
'W': (-1, 0),
'NW': (-1, 1)
}
S_START = 4
F_START = 0
class Piece:
def __init__(self, x, y, colour):
self.x = x
self.y = y
self.colour = colour
class Neutron:
def __init__(self, x, y):
self.x = x
self.y = y
class Board:
def __init__(self, f_order, s_order):
grid = [[None for _ in range(5)] for _ in range(5)]
self.neutron = Neutron(2, 2)
grid[2][2] = self.neutron
for i in range(5):
grid[F_START][i] = Piece(i, F_START, 'F')
for i in range(5):
grid[S_START][i] = Piece(i, S_START, 'S')
self.f_order = tuple(f_order)
self.s_order = tuple(s_order)
def neutron_can_move_to(self, x, y):
dx = x - self.neutron.x
dy = y - self.neutron.y
if abs(dx) == abs(dy):
# we can move diagonally
return True
elif dx == 0 or dy == 0:
return True
else:
return False
def move_neutron(self, x, y):
if grid[y][x] is not None:
grid[y][x] = self.neutron
grid[self.neutron.y][self.neutron.y] = None
self.neutron.x = x
self.neutron.y = y
else:
raise ValueError("Invalid Neutron Location")
def play_first_move(self):
# move the neutron first
# see if there is an instant winning move
for x in range(5):
if self.neutron_can_move_to(x, 4):
# win!
return True
#
def __repr__(self):
output = ''
for y in range(5):
for x in range(5):
if self.grid[y][x] is None:
output += '.'
elif isinstance(self.grid[y][x], Neutron):
output += '*'
else:
output += self.grid[y][x].colour
output += '\n'
return output
|
11517281
|
from rest_framework.renderers import TemplateHTMLRenderer, JSONRenderer
from django.conf import settings
from wq.db.default_settings import SRID as DEFAULT_SRID
import re
APP_TEMPLATES = {}
def load_app_template(template_name):
with open(template_name) as f:
template = f.read()
template = re.sub(
'<title>(.+)</title>',
'<title>{{title}}</title>',
template
)
if '{{' in template:
return template, True
else:
return template, False
def get_title(data, request):
title = None
if isinstance(data, dict):
title = data.get('label')
return title or settings.PROJECT_NAME
def render_app(template_name, data, request):
if template_name not in APP_TEMPLATES:
APP_TEMPLATES[template_name] = load_app_template(template_name)
template, has_title = APP_TEMPLATES[template_name]
if has_title:
from wq.db.rest import router
return (template
.replace('{{title}}', get_title(data, request))
.replace('{{base_url}}', router.get_base_url()))
else:
return template
class HTMLRenderer(TemplateHTMLRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
if getattr(settings, 'WQ_APP_TEMPLATE', None):
return render_app(
settings.WQ_APP_TEMPLATE,
data,
(renderer_context or {}).get('request'),
)
return super(HTMLRenderer, self).render(
data, accepted_media_type, renderer_context
)
class JSONRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
if renderer_context and 'request' in renderer_context:
if not renderer_context['request'].is_ajax():
renderer_context['indent'] = 4
return super(JSONRenderer, self).render(
data, accepted_media_type, renderer_context
)
class ESMRenderer(JSONRenderer):
media_type = 'application/javascript'
format = 'js'
def render(self, data, accepted_media_type=None, renderer_context=None):
data = super(JSONRenderer, self).render(
data, accepted_media_type, renderer_context
)
return b"const config = " + data + b";\nexport default config;"
class GeoJSONRenderer(JSONRenderer):
media_type = 'application/geo+json'
format = 'geojson'
def render(self, data, *args, **kwargs):
if isinstance(data, list):
features, simple = self.render_features(data)
data = {
'type': 'FeatureCollection',
'features': features
}
elif "list" in data and isinstance(data['list'], list):
features, simple = self.render_features(data['list'])
data['type'] = 'FeatureCollection'
data['features'] = features
del data['list']
else:
data, simple = self.render_feature(data)
if not simple and getattr(settings, 'SRID', None) != DEFAULT_SRID:
data['crs'] = {
'type': 'name',
'properties': {
'name': 'urn:ogc:def:crs:EPSG::%s' % settings.SRID
}
}
return super(GeoJSONRenderer, self).render(data, *args, **kwargs)
def render_feature(self, obj):
feature = {
'type': 'Feature',
'properties': obj
}
simple = False
if 'id' in obj:
feature['id'] = obj['id']
del obj['id']
if 'latitude' in obj and 'longitude' in obj:
feature['geometry'] = {
'type': 'Point',
'coordinates': [obj['longitude'], obj['latitude']]
}
del obj['latitude']
del obj['longitude']
simple = True
else:
for key, val in list(obj.items()):
if isinstance(val, dict) and 'type' in val and (
'coordinates' in val or 'geometries' in val
):
feature['geometry'] = val
del obj[key]
if 'features' in obj:
feature['features'] = obj['features']
feature['type'] = 'FeatureCollection'
del obj['features']
return feature, simple
def render_features(self, objs):
features = []
has_simple = False
for obj in objs:
feature, simple = self.render_feature(obj)
if simple:
has_simple = True
if feature['geometry']['coordinates'][0] is not None:
features.append(feature)
else:
features.append(feature)
return features, has_simple
|
11517290
|
import logging
from django.http import Http404
from django.shortcuts import render
from django.urls import reverse
from azbankgateways import bankfactories, models as bank_models, default_settings as settings
from azbankgateways.apps import AZIranianBankGatewaysConfig
from azbankgateways.exceptions import AZBankGatewaysException
from ..forms import PaymentSampleForm
def sample_payment_view(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = PaymentSampleForm(request.POST)
# check whether it's valid:
if form.is_valid():
amount = form.cleaned_data['amount']
mobile_number = form.cleaned_data['mobile_number']
factory = bankfactories.BankFactory()
try:
bank = factory.auto_create()
bank.set_request(request)
bank.set_amount(amount)
# یو آر ال بازگشت به نرم افزار برای ادامه فرآیند
bank.set_client_callback_url(reverse(f'{AZIranianBankGatewaysConfig.name}:sample-result'))
bank.set_mobile_number(mobile_number) # اختیاری
# در صورت تمایل اتصال این رکورد به رکورد فاکتور یا هر چیزی که بعدا بتوانید ارتباط بین محصول یا خدمات را با این
# پرداخت برقرار کنید.
bank_record = bank.ready()
# هدایت کاربر به درگاه بانک
return bank.redirect_gateway()
except AZBankGatewaysException as e:
logging.critical(e)
# TODO: redirect to failed result.
raise e
# if a GET (or any other method) we'll create a blank form
else:
form = PaymentSampleForm()
return render(request, 'azbankgateways/samples/gateway.html', {'form': form})
def sample_result_view(request):
tracking_code = request.GET.get(settings.TRACKING_CODE_QUERY_PARAM, None)
if not tracking_code:
logging.debug("این لینک معتبر نیست.")
raise Http404
try:
bank_record = bank_models.Bank.objects.get(tracking_code=tracking_code)
except bank_models.Bank.DoesNotExist:
logging.debug("این لینک معتبر نیست.")
raise Http404
return render(request, 'azbankgateways/samples/result.html', {'bank_record': bank_record})
|
11517332
|
from argparse import ArgumentTypeError
from nose.tools import assert_equals
from nose.tools import assert_not_equals
from nose.tools import assert_raises
from vcsi.vcsi import Grid, mxn_type
def test_grid_default():
test_grid = mxn_type('4x4')
assert_equals(test_grid.x, 4)
assert_equals(test_grid.y, 4)
def test_grid_equality():
g1 = Grid(4, 4)
g2 = Grid(4, 4)
assert_equals(g1, g2)
def test_grid_inequality():
g1 = Grid(4, 4)
g2 = Grid(3, 4)
assert_not_equals(g1, g2)
def test_grid_columns_integer():
assert_raises(ArgumentTypeError, mxn_type, 'ax4')
assert_raises(ArgumentTypeError, mxn_type, '4.1x4')
def test_grid_columns_positive():
assert_raises(ArgumentTypeError, mxn_type, '-1x4')
def test_grid_rows_integer():
assert_raises(ArgumentTypeError, mxn_type, '4xa')
assert_raises(ArgumentTypeError, mxn_type, '4x4.1')
def test_grid_rows_positive():
assert_raises(ArgumentTypeError, mxn_type, '4x-1')
def test_grid_format():
assert_raises(ArgumentTypeError, mxn_type, '')
assert_raises(ArgumentTypeError, mxn_type, '4xx4')
assert_raises(ArgumentTypeError, mxn_type, '4x1x4')
assert_raises(ArgumentTypeError, mxn_type, '4')
|
11517336
|
from numpy import array, ndarray
from numpy.linalg import norm
from math import acos, cos, sin, pi, sqrt
import pygame
from typing import Callable, Union
from electripy.physics.charges import Proton, Electron
from electripy.physics.charge_distribution import ChargeDistribution
from electripy.visualization import colors, settings, numbers
from collections import deque
import pkg_resources
SOUND_PATH = pkg_resources.resource_filename(
"electripy.visualization", "sounds/add_charge.wav"
)
class Screen:
def __init__(
self,
title: str,
height: int,
width: int,
resizable: bool,
background_color: str,
):
# Window setup
pygame.display.set_caption(title)
if resizable:
self._window = pygame.display.set_mode((height, width), pygame.RESIZABLE)
else:
self._window = pygame.display.set_mode((height, width))
self.background_color = background_color
self.clean()
# Screen attributes
self._electric_field_copy = None
self._last_cursor_position = (0, 0)
self._last_screen_size = self._window.get_size()
# Charge distribution and Vector setup
self.charge_distribution = ChargeDistribution()
self.force_vector = Vector(
self._window,
settings.DEFAULT_FORCE_VECTOR_SCALE_FACTOR,
settings.MINIMUM_FORCE_VECTOR_NORM,
)
self.charges_removed = deque()
# Electric Field
self.ef_vector = Vector(
self._window,
settings.DEFAULT_EF_VECTOR_SCALE_FACTOR,
settings.MINIMUM_ELECTRIC_FIELD_VECTOR_NORM,
)
self.electric_field = Field(
self._window,
settings.DEFAULT_EF_BRIGHTNESS,
self.charge_distribution.get_electric_field,
settings.DEFAULT_SPACE_BETWEEN_EF_VECTORS,
)
# State attributes
self.showing_vectors_components = False
self.showing_electric_forces_vectors = False
self.showing_electric_field_at_mouse_position = False
self.showing_electric_field = True
# Sounds setup
self.add_charge_sound = pygame.mixer.Sound(SOUND_PATH)
# Text settings
pygame.font.init()
self.vector_components_font = pygame.font.SysFont(
settings.VECTOR_COMPONENTS_FONT, settings.VECTOR_COMPONENTS_FONT_SIZE
)
self.vector_components_font_color = colors.WHITE
self.proton_text_surface = pygame.font.SysFont(
settings.CHARGES_SIGN_FONT, settings.PROTON_SIGN_FONT_SIZE, bold=True
).render("+", False, colors.BLACK)
self.electron_text_surface = pygame.font.SysFont(
settings.CHARGES_SIGN_FONT, settings.ELECTRON_SIGN_FONT_SIZE, bold=False
).render("-", False, colors.BLACK)
def clean(self) -> None:
"""Fills the screen with it's background color."""
self._window.fill(self.background_color)
def clear(self) -> None:
"""Restarts charge distribution."""
self.clear_electric_field_copy()
self.charge_distribution = ChargeDistribution()
self.clean()
def add_charge(
self, charge: Union[Proton, Electron], clean_charges_removed: bool
) -> None:
"""Adds a charge to the screen and to the charge distribution."""
self.add_charge_sound.play()
self.charge_distribution.add_charge(charge)
self.electric_field.field_function = self.charge_distribution.get_electric_field
self.clear_electric_field_copy()
self.refresh_screen()
if clean_charges_removed:
self.charges_removed = deque()
def add_last_charge_removed(self) -> None:
if not self.charges_removed:
return
charge = self.charges_removed.pop()
self.add_charge(charge, False)
def remove_last_charge_added(self) -> None:
if not len(self.charge_distribution):
return
charge = self.charge_distribution[-1]
self.charge_distribution.remove_charge(charge)
self.electric_field.field_function = self.charge_distribution.get_electric_field
self.charges_removed.append(charge)
self.clear_electric_field_copy()
self.refresh_screen()
def show_electric_field_vector(self, x: int, y: int) -> None:
"""Shows the electric field vector at the given position."""
position = array([x, y])
ef = self.charge_distribution.get_electric_field(position)
self._draw_vector(
self.ef_vector,
position,
ef,
AnimatedPoint.RADIUS,
colors.GREEN,
self.showing_vectors_components,
)
def increment_scale_factor(self) -> None:
"""
Increments the ef_vector or force_vector factor depending
on the current mode.
"""
self.force_vector.scale_factor *= Vector.DELTA_SCALE_FACTOR
self.ef_vector.scale_factor *= Vector.DELTA_SCALE_FACTOR
self.refresh_screen()
def decrement_scale_factor(self) -> None:
"""
Decrements the ef_vector or force_vector factor depending
on the current mode.
"""
self.force_vector.scale_factor /= Vector.DELTA_SCALE_FACTOR
self.ef_vector.scale_factor /= Vector.DELTA_SCALE_FACTOR
self.refresh_screen()
def increment_electric_field_brightness(self) -> None:
if self.electric_field.brightness < Field.MAX_BRIGHTNESS:
self.electric_field.brightness += Field.BRIGHTNESS_VARIATION
self.clear_electric_field_copy()
def decrement_electric_field_brightness(self) -> None:
if self.electric_field.brightness > Field.MIN_BRIGHTNESS:
self.electric_field.brightness -= Field.BRIGHTNESS_VARIATION
self.clear_electric_field_copy()
def _draw_vector(
self,
vector,
position: ndarray,
array: ndarray,
radius: int,
color: tuple,
show_components: bool,
) -> None:
vector.draw(position, array, radius, color)
if show_components:
self._display_arrays_components(list(vector.last_end_point), array)
def _display_arrays_components(self, position: list, array: ndarray):
"""Displays the arrays components next to the vector drawn."""
x, y = numbers.array_to_string(array)
x_text = self.vector_components_font.render(
x, True, self.vector_components_font_color
)
y_text = self.vector_components_font.render(
y, True, self.vector_components_font_color
)
self._window.blit(x_text, position)
position[1] += 15
self._window.blit(y_text, position)
def refresh_screen(self, mx: int = None, my: int = None) -> None:
"""
Cleans the screen, get electric forces and calls _draw_charge
for each charge on screen.
"""
self.clean()
if self.showing_electric_field:
if not self._electric_field_copy:
self.show_electric_field()
else:
self._window.blit(self._electric_field_copy, (0, 0))
electric_forces = self.charge_distribution.get_electric_forces()
for ef in electric_forces:
charge = ef[0]
force = ef[1]
self._draw_charge(charge, force)
if self.showing_electric_field_at_mouse_position:
if mx is None or my is None:
mx, my = self._last_cursor_position
self.show_electric_field_vector(mx, my)
if mx is not None or my is not None:
self._last_cursor_position = (mx, my)
def _draw_charge(self, charge: Union[Proton, Electron], force: ndarray) -> None:
"""Draws a charge and its force vector."""
if isinstance(charge, Proton):
color = AnimatedProton.COLOR
radius = AnimatedProton.RADIUS
charge_text_surface = self.proton_text_surface
y_sign_displacement = 1
if isinstance(charge, Electron):
color = AnimatedElectron.COLOR
radius = AnimatedElectron.RADIUS
charge_text_surface = self.electron_text_surface
y_sign_displacement = 2
pygame.draw.circle(self._window, color, charge.position, radius)
# Draw charge sign:
sign_position = (
charge.position[0] - charge_text_surface.get_width() // 2,
charge.position[1] - charge_text_surface.get_width() * y_sign_displacement,
)
self._window.blit(charge_text_surface, sign_position)
if len(self.charge_distribution) > 1 and self.showing_electric_forces_vectors:
self._draw_vector(
self.force_vector,
charge.position,
force,
radius,
colors.YELLOW,
self.showing_vectors_components,
)
def show_electric_field(self) -> None:
restricted_points = [
charge.position for charge in self.charge_distribution.charges_set.charges
]
self.electric_field.draw(restricted_points)
self._electric_field_copy = self._window.copy()
def clear_electric_field_copy(self):
self._electric_field_copy = None
def has_been_resized(self) -> bool:
if self._window.get_size() != self._last_screen_size:
self._last_screen_size = self._window.get_size()
return True
return False
class Field:
BRIGHTNESS_VARIATION = 25
MAX_BRIGHTNESS = 205
MIN_BRIGHTNESS = 50
def __init__(
self,
window: pygame.Surface,
brightness: int,
field_function: Callable,
space_between_vectors: int,
) -> None:
"""
field_function must be a function that given a 2-dimensional vector
returns another 2-dimensional vector.
space_between_vectors is the amount of pixels between each vector. The
shorter space_between_vectors is the more accurate the field will be.
"""
self._window = window
self.brightness = brightness
self.vector_painter = ColoredVector(window)
self.field_function = field_function
self.space_between_vectors = space_between_vectors
def _is_in_restricted_point(
self, position: ndarray, restricted_points: list[ndarray]
) -> bool:
for point in restricted_points:
if norm(position - point) <= AnimatedProton.RADIUS * 2:
return True
return False
def _get_field(self, restricted_points: list[ndarray]) -> list[dict]:
field = []
w, h = self._window.get_size()
for x in range(0, w, self.space_between_vectors):
for y in range(0, h, self.space_between_vectors):
if self._is_in_restricted_point(array((x, y)), restricted_points):
continue
field.append(
{"position": (x, y), "vector": (self.field_function((x, y)))}
)
return field
@staticmethod
def get_sorted_field_vectors(field: list[dict]) -> list[dict]:
field_vectors = [d["vector"] for d in field]
return sorted(field_vectors, reverse=True, key=norm)
@staticmethod
def get_greatest_norm(field: list[dict]) -> float:
return norm(Field.get_sorted_field_vectors(field)[0])
def draw(self, restricted_points: list[ndarray]) -> None:
field = self._get_field(restricted_points)
greatest_norm = Field.get_greatest_norm(field)
red_blue_color_generator = colors.RedBlueColorGenerator(greatest_norm)
for d in field:
self.vector_painter.draw(
d["position"],
d["vector"],
red_blue_color_generator.get_color(norm(d["vector"]), self.brightness),
)
class Vector:
DELTA_SCALE_FACTOR = 2
DEFAULT_VECTOR_HEAD_LENGTH = 8
DEFAULT_VECTOR_WIDTH = 2
def __init__(
self, window: pygame.Surface, scale_factor: int, minimum_vector_norm: int = None
) -> None:
self._window = window
self.scale_factor = scale_factor
self.minimum_vector_norm = minimum_vector_norm
self.last_end_point = [0, 0]
def draw(
self,
position: tuple,
vector: tuple,
radius: int,
color: tuple,
) -> None:
"""Draws a vector at the given position."""
vector_norm = (vector[0] ** 2 + vector[1] ** 2) ** (1 / 2)
unit_vector = [vector[0] / vector_norm, vector[1] / vector_norm]
if self.minimum_vector_norm:
if vector_norm * self.scale_factor < self.minimum_vector_norm:
vector = (
unit_vector[0] * self.minimum_vector_norm / self.scale_factor,
unit_vector[1] * self.minimum_vector_norm / self.scale_factor,
)
start_point = (
position[0] + unit_vector[0] * radius,
position[1] + unit_vector[1] * radius,
)
end_point = (
start_point[0] + vector[0] * self.scale_factor,
start_point[1] + vector[1] * self.scale_factor,
)
pygame.draw.line(
self._window, color, start_point, end_point, Vector.DEFAULT_VECTOR_WIDTH
)
self._draw_vector_head(
vector,
end_point,
color,
Vector.DEFAULT_VECTOR_HEAD_LENGTH,
Vector.DEFAULT_VECTOR_WIDTH,
)
self.last_end_point = end_point
def _draw_vector_head(
self, vector, vector_end_point, color, head_length, head_width
):
vector_angle = Vector.get_angle(vector)
if vector[1] < 0:
vector_angle *= -1
left_head_vector = (
head_length * cos(vector_angle + pi * 5 / 4),
head_length * sin(vector_angle + pi * 5 / 4),
)
left_head_endpoint = (
vector_end_point[0] + left_head_vector[0],
vector_end_point[1] + left_head_vector[1],
)
pygame.draw.line(
self._window,
color,
vector_end_point,
left_head_endpoint,
head_width,
)
right_head_vector = (
head_length * cos(vector_angle - pi * 5 / 4),
head_length * sin(vector_angle - pi * 5 / 4),
)
right_head_endpoint = (
vector_end_point[0] + right_head_vector[0],
vector_end_point[1] + right_head_vector[1],
)
pygame.draw.line(
self._window,
color,
vector_end_point,
right_head_endpoint,
head_width,
)
@staticmethod
def get_norm(vector):
return sqrt(vector[0] ** 2 + vector[1] ** 2)
@staticmethod
def get_angle(vector):
return acos(vector[0] / Vector.get_norm(vector))
class ColoredVector(Vector):
"""
Vector which length is determined by its color. The more red it
is the grater its norm is.
"""
DEFAULT_VECTOR_WIDTH = 2
DEFAULT_HEAD_LENGTH = 4
def __init__(
self,
window: pygame.Surface,
scale_factor: int = 10,
) -> None:
self._window = window
self.scale_factor = scale_factor
def draw(self, position: tuple, vector: tuple, color: tuple) -> None:
"""Draws a unit vector scaled by scale_factor at the given position."""
vector_norm = (vector[0] ** 2 + vector[1] ** 2) ** (1 / 2)
unit_vector = [vector[0] / vector_norm, vector[1] / vector_norm]
end_point = [
position[0] + unit_vector[0] * self.scale_factor,
position[1] + unit_vector[1] * self.scale_factor,
]
pygame.draw.line(
self._window, color, position, end_point, ColoredVector.DEFAULT_VECTOR_WIDTH
)
self._draw_vector_head(
vector,
end_point,
color,
ColoredVector.DEFAULT_HEAD_LENGTH,
ColoredVector.DEFAULT_VECTOR_WIDTH,
)
class AnimatedProton:
COLOR = colors.RED
RADIUS = 20
class AnimatedElectron:
COLOR = colors.BLUE
RADIUS = 20
class AnimatedPoint:
COLOR = colors.ORANGE
RADIUS = 10
|
11517337
|
import os
import unittest
from programy.utils.files.filewriter import ConversationFileWriter
from programy.utils.files.filewriter import FileWriterConfiguration
from programytest.utils.files.utils import get_os_specific_path
class ConversationFileWriterTests(unittest.TestCase):
def test_init(self):
filename = get_os_specific_path() + os.sep + "conversation.txt"
config = FileWriterConfiguration(filename=filename, fileformat="txt", mode="a", encoding="utf-8",
delete_on_start=False)
writer = ConversationFileWriter(config)
self.assertIsNotNone(writer)
writer.log_question_and_answer("client1", "question", "answer")
self.assertTrue(os.path.exists(filename))
if os.path.exists(filename):
os.remove(filename)
self.assertFalse(os.path.exists(filename))
|
11517347
|
import torch
from torch.nn.functional import mse_loss as mse
def psnr(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Create a function that calculates the PSNR between 2 images.
PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.
Given an m x n image, the PSNR is:
.. math::
\text{PSNR} = 10 \log_{10} \bigg(\frac{\text{MAX}_I^2}{MSE(I,T)}\bigg)
where
.. math::
\text{MSE}(I,T) = \frac{1}{mn}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
and :math:`\text{MAX}_I` is the maximum possible input value
(e.g for floating point images :math:`\text{MAX}_I=1`).
Args:
input: the input image with arbitrary shape :math:`(*)`.
labels: the labels image with arbitrary shape :math:`(*)`.
max_val: The maximum value in the input tensor.
Return:
the computed loss as a scalar.
Examples:
>>> ones = torch.ones(1)
>>> psnr(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(20.0000)
Reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Expected torch.Tensor but got {type(target)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Expected torch.Tensor but got {type(input)}.")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
return 10.0 * torch.log10(max_val ** 2 / mse(input, target, reduction='mean'))
|
11517364
|
import logging
import os
import struct
import sys
import time
import importlib_resources
import kibra
import kibra.database as db
import kibra.network as NETWORK
from kibra.ktask import Ktask
from kibra.shell import bash
from kibra.thread import DEFS, TLV
from kibra.tlv import ThreadTLV
from kitools import kidfu, kifwu, kiserial
NCP_FW_FOLDER = 'kibra.ncp_fw'
SERIAL_DEV = None
def send_cmd(cmd, debug_level=None):
logging.info(cmd)
try:
resp = SERIAL_DEV.ksh_cmd(cmd, debug_level)
except:
logging.error('Device %s is not responding.', db.get('serial_device'))
logging.info('\n'.join(resp))
return resp
def _find_device(snum):
'''Find the serial device with the required serial number'''
if snum:
logging.info('Trying to find device with serial %s.', snum)
kirale_devs = kiserial.find_devices(has_snum=snum)
if kirale_devs:
return kirale_devs[0].port
# Attempt to find any attached KiNOS device
logging.info('Trying to find a KiNOS device...')
kirale_devs = kiserial.find_devices(has_br=True)
if kirale_devs:
logging.info('KiNOS device was found on %s!', kirale_devs[0].port)
return kirale_devs[0].port
logging.error('No KiNOS devices found.')
sys.exit()
def ncp_fw_update():
'''
Compare the NCP firmware with the one available in the 'ncp_fiwmare' folder
and update if needed
'''
# Find the DFU file that matches the required fw version
dfu_file = None
ver_num = kibra.__kinosver__.split(' v')[-1]
for file_name in importlib_resources.contents(NCP_FW_FOLDER):
if ver_num in file_name:
# TODO: This relies on the file name, we could also check the file
# contents to make sure
dfu_file = file_name
break
if not dfu_file:
logging.error('Required NCP firmware not present.')
sys.exit()
# Flash the NCP and re-enable it
with importlib_resources.path(NCP_FW_FOLDER, dfu_file) as dfu_path:
logging.warn('NCP will be updated with firmware v%s' % ver_num)
try:
dfu_file = kidfu.DfuFile(str(dfu_path))
kifwu.dfu_find_and_flash(dfu_file, unattended=True)
except Exception as exc:
logging.error('Problem updating NCP firmware: %s' % exc)
sys.exit()
logging.info('NCP updated successfully.')
def enable_ncp():
'''Find the device and initialize the port'''
global SERIAL_DEV
# Find device and initialize port
port = _find_device(db.get('ncp_serial'))
if not port:
return
logging.info('Serial device is %s.', port)
db.set('serial_device', port)
SERIAL_DEV = kiserial.KiSerial(port, debug=kiserial.KiDebug(kiserial.KiDebug.NONE))
send_cmd('debug level none', debug_level=kiserial.KiDebug.NONE)
# Save serial number
serial = send_cmd('show snum')[0]
db.set('ncp_serial', serial)
# Update the NCP firmware if needed
if kibra.__kinosver__ not in send_cmd('show swver')[-1]:
logging.info('NCP needs a firmware update.')
ncp_fw_update()
enable_ncp()
# No need to continue if NCP fw version is up to date
else:
logging.info('NCP firmware is up to date.')
# Make sure we are running Thread v3 (1.2.0)
if not kibra.__harness__ and 'Thread v3' not in send_cmd('show thver')[0]:
send_cmd('clear')
SERIAL_DEV.wait_for('status', 'none')
send_cmd('config thver 3')
# Enable ECM if not enabled
if 'off' in send_cmd('show hwconfig')[3]:
logging.info('Enabling CDC Ethernet and reseting device.')
send_cmd('config hwmode 4')
send_cmd('reset')
time.sleep(3)
del SERIAL_DEV
enable_ncp()
def _ncp_apply_config():
# Config network parameters
if 'ncp_emac' in db.CFG:
send_cmd('config emac %s' % db.get('ncp_emac'))
if db.get('ncp_outband'):
# TODO: make sure that the required settings exist
send_cmd('config outband')
if 'ncp_xpanid' in db.CFG:
send_cmd('config xpanid %s' % db.get('ncp_xpanid'))
if 'ncp_netkey' in db.CFG:
send_cmd('config mkey %s' % db.get('ncp_netkey'))
if 'ncp_prefix' in db.CFG:
send_cmd('config mlprefix %s' % db.get('ncp_prefix').split('/')[0])
if 'ncp_channel' in db.CFG:
logging.info('Configure NCP channel %s.', db.get('ncp_channel'))
send_cmd('config channel %s' % db.get('ncp_channel'))
if 'ncp_panid' in db.CFG:
logging.info('Configure NCP panid %s.', db.get('ncp_panid'))
send_cmd('config panid %s' % db.get('ncp_panid'))
if 'ncp_netname' in db.CFG:
logging.info('Configure NCP network name %s.', db.get('ncp_netname'))
send_cmd('config netname "%s"' % db.get('ncp_netname'))
if 'ncp_commcred' in db.CFG:
logging.info(
'Configure NCP comissioner credential %s.', db.get('ncp_commcred')
)
send_cmd('config commcred "%s"' % db.get('ncp_commcred'))
# Set role
role = db.get('ncp_role')
logging.info('Set NCP as %s.', role)
send_cmd('config role %s' % role)
def _configure():
global SERIAL_DEV
# Wait for the NCP to reach a steady status
logging.info('Waiting until NCP is steady...')
ncp_status = 'disconnected'
while not ('none' in ncp_status or 'joined' in ncp_status):
ncp_status = send_cmd('show status')[0]
time.sleep(1)
db.set('ncp_status', ncp_status)
# Different actions according to NCP status
if ncp_status == 'none':
if not kibra.__harness__:
_ncp_apply_config()
_enable_br()
send_cmd('ifup')
elif ncp_status == 'none - saved configuration':
_enable_br()
send_cmd('ifup')
elif ncp_status == 'joined':
send_cmd('ifdown')
_configure()
else: # Other 'none' statuses
logging.warning('Dongle status was "%s".' % ncp_status)
send_cmd('clear')
SERIAL_DEV.wait_for('status', 'none')
_configure()
def _enable_br():
'''Enable CDC ETH traffic'''
send_cmd('config brouter on')
logging.info('CDC ETH traffic has been enabled.')
def bbr_dataset_update():
'''
Update Thread BBR Service Data
Automatically increases the sequence number
'''
# Increase sequence number
bbr_sequence_number = (db.get('bbr_seq') + 1) % 0xFF
# Build s_server_data
reregistration_delay = db.get('rereg_delay')
mlr_timeout = db.get('mlr_timeout')
s_server_data = struct.pack(
DEFS.THREAD_SERVICE_DATA_FMT,
bbr_sequence_number,
reregistration_delay,
mlr_timeout,
)
# Store used values
db.set('bbr_seq', bbr_sequence_number)
# Enable BBR
send_cmd(
'config service add %u %s %s'
% (
DEFS.THREAD_ENTERPRISE_NUMBER,
DEFS.THREAD_SERVICE_DATA_BBR,
bytes(s_server_data).hex(),
)
)
logging.info(
'BBR update: Seq. = %d MLR Timeout = %d, Rereg. Delay = %d'
% (bbr_sequence_number, mlr_timeout, reregistration_delay)
)
def prefix_handle(
type_: str, # 'prefix' or 'route'
action: str, # 'add' or 'remove'
prefix: str, # 'prefix/length'
stable=False,
on_mesh=False,
preferred=False,
slaac=False,
dhcp=False,
configure=False,
default=False,
preference='medium',
nd_dns=False,
dp=False,
):
'''
5.18.3 Border Router TLV, 16 bits of flags:
0 - Reserved --> Used by Kirale command to indicate Stable
1 - Reserved
2 - Reserved
3 - Reserved
4 - Reserved
5 - Reserved
6 - DP
7 - ND DNS
8 - On Mesh
9 - Default
10 - Configure
11 - DHCP
12 - SLAAC
13 - Preferred
14 - Preference
15 - Preference
'''
flags = 0x0000
if stable:
flags |= 1 << 0
if on_mesh:
flags |= 1 << 8
if preferred:
flags |= 1 << 13
if slaac:
flags |= 1 << 12
if dhcp:
flags |= 1 << 11
if configure:
flags |= 1 << 10
if default:
flags |= 1 << 9
if nd_dns:
flags |= 1 << 7
if dp:
flags |= 1 << 6
if preference == 'high':
flags |= 1 << 14
elif preference == 'low':
flags |= 3 << 14
flags = '0x' + str(hex(flags).replace('0x', '').zfill(4))
pool, length = prefix.split('/')
send_cmd('config %s %s %s %s %s' % (type_, action, pool, length, flags))
logging.info('Config %s %s %s/%s', type_, action, pool, length)
def _bagent_on():
send_cmd('config bagent on')
logging.info('Border agent has been enabled.')
def _bagent_off():
send_cmd('config bagent off')
logging.info('Border agent has been disabled.')
def _get_prefix_flags():
slaac = True if db.get('prefix_slaac') else False
dhcp = True if db.get('prefix_dhcp') else False
dp = True if db.get('prefix_dua') else False
# Force SLAAC if no other flags are set
if not dp and not dhcp:
slaac = True
# DHCP overrides SLAAC
if dhcp:
slaac = False
return slaac, dhcp, dp
class SERIAL(Ktask):
def __init__(self):
Ktask.__init__(
self,
name='serial',
start_keys=['ncp_serial'],
start_tasks=['network', 'syslog'],
stop_tasks=['diags', 'coapserver'],
period=2,
)
def kstart(self):
db.set('prefix_active', 0)
db.set('ncp_heui64', send_cmd('show heui64')[0])
_configure()
# From now on the syslog daemon will detect changes
def kstop(self):
if db.get('prefix_active'):
# Remove prefix from the network
slaac, dhcp, dp = _get_prefix_flags()
prefix_handle(
'prefix',
'remove',
db.get('prefix'),
stable=True,
on_mesh=True,
default=True,
slaac=slaac,
dhcp=dhcp,
dp=dp,
)
# Mark prefix as inactive
db.set('prefix_active', 0)
_bagent_off()
send_cmd('ifdown')
async def periodic(self):
# Detect if serial was disconnected
try:
SERIAL_DEV.is_active()
except IOError:
logging.error('Device %s has been disconnected.', db.get('serial_device'))
self.kstop()
self.kill()
except Exception:
logging.error('Device %s is not responding.', db.get('serial_device'))
return
# Don't continue if device is not joined
if db.get('ncp_status') != 'joined' or db.get('status_serial') != 'running':
return
if not db.get('prefix_active'):
slaac, dhcp, dp = _get_prefix_flags()
# Don't continue if servers are not running
if dhcp and db.get('status_dhcp') not in 'running':
return
if dp and db.get('status_coapserver') not in 'running':
return
# Enable border agent
_bagent_on()
# Add route
NETWORK.ncp_route_enable(db.get('prefix'))
# Announce prefix to the network
prefix_handle(
'prefix',
'add',
db.get('prefix'),
stable=True,
on_mesh=True,
default=True,
slaac=slaac,
dhcp=dhcp,
dp=dp,
)
# Start as Secondary (KiNOS will notify the change to Primary)
db.set('bbr_status', 'secondary')
logging.info('This BBR is now Secondary.')
# Announce service
bbr_dataset_update()
# Mark prefix as active
db.set('prefix_active', 1)
|
11517403
|
import argparse
import pandas as pd
import numpy as np
import re
parser = argparse.ArgumentParser(description='Encode CpG labels from Wig file into compact format')
parser.add_argument('dataFile', type=str, metavar='<.txt file>',
help='data file')
parser.add_argument('EncodedGenome', type=str, metavar='<.npz file>',
help='Encoded genome from EncodeGenome.py.')
parser.add_argument('y_outFile', type=str, metavar='<.npz file>',
help='output file to save encoded labels in.')
parser.add_argument('pos_outFile', type=str, metavar='<.npz file>',
help='output file to save encoded positions of labels in.')
parser.add_argument('--chroms', nargs="+", type=str, required=True,
help='ordering of chromosomes in the fasta file')
parser.add_argument('--prepend_chr', action='store_true',
help='whether to prepend the str "chr" to the names of chromosomes given in --chroms.')
args = parser.parse_args()
chroms = args.chroms
if args.prepend_chr:
chroms = ["chr" + c for c in chroms]
print('Reading data ...')
dat = pd.read_csv(args.dataFile, sep='\t', header=None,
dtype={0:'string'})
dat_plus = dat[dat[2]=='+']
dat_minus = dat[dat[2]=='-']
assert ((dat_minus[1]-1).values != dat_plus[1].values).sum()==0, 'No 100\% overlap between CpG sites for + in - strands in data'
count_meth = dat_plus[3].values+dat_minus[3].values
count_unmeth = dat_plus[4].values+dat_minus[4].values
to_fill_df = dat_plus[[0,1]]
filter_ = count_meth+count_unmeth != 0
count_meth = count_meth[filter_]
count_unmeth = count_unmeth[filter_]
to_fill_df = to_fill_df[filter_]
to_fill_df.loc[:,2] = (count_meth/(count_meth+count_unmeth)>=0.5).astype('int8')
to_fill_df.loc[:,3] = count_meth.astype('uint16')
to_fill_df.loc[:,4] = count_unmeth.astype('uint16')
X_encoded = np.load(args.EncodedGenome)
y_encoded = {}
pos_encoded = {}
for chrom_name in chroms:
print('Encoding',chrom_name,'...')
X_chrom = X_encoded[chrom_name]
indices = np.where(X_chrom==2)[0]
dat_chrom = to_fill_df[to_fill_df[0] == chrom_name[3:]]
dat_subset = dat_chrom[np.in1d(dat_chrom[1].values-1, indices)]
y_encoded[chrom_name] = dat_subset[2].values.astype('int8')
pos_encoded[chrom_name] = (dat_subset[1].values-1).astype('int32')
np.savez_compressed(args.y_outFile, **y_encoded)
np.savez_compressed(args.pos_outFile, **pos_encoded)
|
11517454
|
import functools
import hashlib
import json
import logging
import os
import re
import shutil
import sys
import tempfile
import packaging.version
import six
from six.moves import shlex_quote
from galaxy.tools.deps.commands import CommandLineException
from galaxy.util import (
smart_str,
unicodify
)
from . import (
commands,
installable
)
log = logging.getLogger(__name__)
# Not sure there are security concerns, lets just fail fast if we are going
# break shell commands we are building.
SHELL_UNSAFE_PATTERN = re.compile(r"[\s\"']")
IS_OS_X = sys.platform == "darwin"
# BSD 3-clause
CONDA_LICENSE = "http://docs.continuum.io/anaconda/eula"
VERSIONED_ENV_DIR_NAME = re.compile(r"__(.*)@(.*)")
UNVERSIONED_ENV_DIR_NAME = re.compile(r"__(.*)@_uv_")
USE_PATH_EXEC_DEFAULT = False
CONDA_VERSION = "4.3.33"
CONDA_BUILD_VERSION = "2.1.18"
USE_LOCAL_DEFAULT = False
def conda_link():
if IS_OS_X:
url = "https://repo.continuum.io/miniconda/Miniconda3-4.6.14-MacOSX-x86_64.sh"
else:
url = "https://repo.continuum.io/miniconda/Miniconda3-4.6.14-Linux-x86_64.sh"
return url
def find_conda_prefix(conda_prefix=None):
""" If supplied conda_prefix is not set, default to the default location
for Miniconda installs.
"""
if conda_prefix is None:
home = os.path.expanduser("~")
miniconda_2_dest = os.path.join(home, "miniconda2")
miniconda_3_dest = os.path.join(home, "miniconda3")
# Prefer miniconda3 install if both available
if os.path.exists(miniconda_3_dest):
return miniconda_3_dest
elif os.path.exists(miniconda_2_dest):
return miniconda_2_dest
else:
return miniconda_3_dest
return conda_prefix
class CondaContext(installable.InstallableContext):
installable_description = "Conda"
def __init__(self, conda_prefix=None, conda_exec=None,
shell_exec=None, debug=False, ensure_channels='',
condarc_override=None, use_path_exec=USE_PATH_EXEC_DEFAULT,
copy_dependencies=False, use_local=USE_LOCAL_DEFAULT):
self.condarc_override = condarc_override
if not conda_exec and use_path_exec:
conda_exec = commands.which("conda")
if conda_exec:
conda_exec = os.path.normpath(conda_exec)
self.conda_exec = conda_exec
self.debug = debug
self.shell_exec = shell_exec or commands.shell
self.copy_dependencies = copy_dependencies
if conda_prefix is None:
info = self.conda_info()
if info and "default_prefix" in info:
conda_prefix = info["default_prefix"]
if conda_prefix is None:
conda_prefix = find_conda_prefix(conda_prefix)
self.conda_prefix = conda_prefix
if conda_exec is None:
self.conda_exec = self._bin("conda")
if ensure_channels:
if not isinstance(ensure_channels, list):
ensure_channels = [c for c in ensure_channels.split(",") if c]
else:
ensure_channels = None
self.ensure_channels = ensure_channels
self._conda_version = None
self._miniconda_version = None
self._conda_build_available = None
self.use_local = use_local
@property
def conda_version(self):
if self._conda_version is None:
self._guess_conda_properties()
return self._conda_version
@property
def conda_build_available(self):
if self._conda_build_available is None:
self._guess_conda_properties()
return self._conda_build_available
def _guess_conda_properties(self):
conda_meta_path = self._conda_meta_path
# Perhaps we should call "conda info --json" and parse it but for now we are going
# to assume the default.
conda_version = packaging.version.parse(CONDA_VERSION)
conda_build_available = False
miniconda_version = "3"
if os.path.exists(conda_meta_path):
for package in os.listdir(conda_meta_path):
package_parts = package.split("-")
if len(package_parts) < 3:
continue
package = '-'.join(package_parts[:-2])
version = package_parts[-2]
# build = package_parts[-1]
if package == "conda":
conda_version = packaging.version.parse(version)
if package == "python" and version.startswith("2"):
miniconda_version = "2"
if package == "conda-build":
conda_build_available = True
self._conda_version = conda_version
self._miniconda_version = miniconda_version
self._conda_build_available = conda_build_available
@property
def _conda_meta_path(self):
return os.path.join(self.conda_prefix, "conda-meta")
@property
def _override_channels_args(self):
override_channels_args = []
if self.ensure_channels:
override_channels_args.append("--override-channels")
for channel in self.ensure_channels:
override_channels_args.extend(["--channel", channel])
return override_channels_args
def ensure_conda_build_installed_if_needed(self):
if self.use_local and not self.conda_build_available:
conda_targets = [CondaTarget("conda-build", version=CONDA_BUILD_VERSION)]
# Cannot use --use-local during installation of conda-build.
return install_conda_targets(conda_targets, conda_context=self, env_name=None, allow_local=False)
else:
return 0
def conda_info(self):
if self.conda_exec is not None:
info_out = commands.execute([self.conda_exec, "info", "--json"])
info_out = unicodify(info_out)
info = json.loads(info_out)
return info
else:
return None
def is_conda_installed(self):
"""
Check if conda_exec exists
"""
if os.path.exists(self.conda_exec):
return True
else:
return False
def can_install_conda(self):
"""
If conda_exec is set to a path outside of conda_prefix,
there is no use installing conda into conda_prefix, since it can't be used by galaxy.
If conda_exec equals conda_prefix/bin/conda, we can install conda if either conda_prefix
does not exist or is empty.
"""
conda_exec = os.path.abspath(self.conda_exec)
conda_prefix_plus_exec = os.path.abspath(os.path.join(self.conda_prefix, 'bin/conda'))
if conda_exec == conda_prefix_plus_exec:
if not os.path.exists(self.conda_prefix):
return True
elif os.listdir(self.conda_prefix) == []:
os.rmdir(self.conda_prefix) # Conda's install script fails if path exists (even if empty).
return True
else:
log.warning("Cannot install Conda because conda_prefix '%s' exists and is not empty.",
self.conda_prefix)
return False
else:
log.warning("Skipping installation of Conda into conda_prefix '%s', "
"since conda_exec '%s' is set to a path outside of conda_prefix.",
self.conda_prefix, self.conda_exec)
return False
def exec_command(self, operation, args, stdout_path=None):
"""
Execute the requested command.
Return the process exit code (i.e. 0 in case of success).
"""
cmd = [self.conda_exec]
if self.debug:
cmd.append("--debug")
cmd.append(operation)
cmd.extend(args)
env = {}
if self.condarc_override:
env["CONDARC"] = self.condarc_override
cmd_string = ' '.join(map(shlex_quote, cmd))
kwds = dict()
try:
if stdout_path:
kwds['stdout'] = open(stdout_path, 'w')
cmd_string += " > '%s'" % stdout_path
conda_exec_home = env['HOME'] = tempfile.mkdtemp(prefix='conda_exec_home_') # We don't want to pollute ~/.conda, which may not even be writable
log.debug("Executing command: %s", cmd_string)
return self.shell_exec(cmd, env=env, **kwds)
except Exception:
log.exception("Failed to execute command: %s", cmd_string)
return 1
finally:
if kwds.get('stdout'):
kwds['stdout'].close()
if conda_exec_home:
shutil.rmtree(conda_exec_home, ignore_errors=True)
def exec_create(self, args, allow_local=True, stdout_path=None):
"""
Return the process exit code (i.e. 0 in case of success).
"""
create_base_args = [
"-y",
"--quiet"
]
if allow_local and self.use_local:
create_base_args.extend(["--use-local"])
create_base_args.extend(self._override_channels_args)
create_base_args.extend(args)
return self.exec_command("create", create_base_args, stdout_path=stdout_path)
def exec_remove(self, args):
"""
Remove a conda environment using conda env remove -y --name `args`.
Return the process exit code (i.e. 0 in case of success).
"""
remove_base_args = [
"remove",
"-y",
"--name"
]
remove_base_args.extend(args)
return self.exec_command("env", remove_base_args)
def exec_install(self, args, allow_local=True, stdout_path=None):
"""
Return the process exit code (i.e. 0 in case of success).
"""
install_base_args = [
"-y"
]
if allow_local and self.use_local:
install_base_args.append("--use-local")
install_base_args.extend(self._override_channels_args)
install_base_args.extend(args)
return self.exec_command("install", install_base_args, stdout_path=stdout_path)
def exec_clean(self, args=[], quiet=False):
"""
Clean up after conda installation.
Return the process exit code (i.e. 0 in case of success).
"""
clean_base_args = [
"--tarballs",
"-y"
]
clean_args = clean_base_args + args
stdout_path = None
if quiet:
stdout_path = "/dev/null"
return self.exec_command("clean", clean_args, stdout_path=stdout_path)
def export_list(self, name, path):
"""
Return the process exit code (i.e. 0 in case of success).
"""
return self.exec_command("list", [
"--name", name,
"--export"
], stdout_path=path)
def env_path(self, env_name):
return os.path.join(self.envs_path, env_name)
@property
def envs_path(self):
return os.path.join(self.conda_prefix, "envs")
def has_env(self, env_name):
env_path = self.env_path(env_name)
return os.path.isdir(env_path)
@property
def deactivate(self):
return self._bin("deactivate")
@property
def activate(self):
return self._bin("activate")
def is_installed(self):
return self.is_conda_installed()
def can_install(self):
return self.can_install_conda()
@property
def parent_path(self):
return os.path.dirname(os.path.abspath(self.conda_prefix))
def _bin(self, name):
return os.path.join(self.conda_prefix, "bin", name)
def installed_conda_targets(conda_context):
envs_path = conda_context.envs_path
dir_contents = os.listdir(envs_path) if os.path.exists(envs_path) else []
for name in dir_contents:
versioned_match = VERSIONED_ENV_DIR_NAME.match(name)
if versioned_match:
yield CondaTarget(versioned_match.group(1), versioned_match.group(2))
unversioned_match = UNVERSIONED_ENV_DIR_NAME.match(name)
if unversioned_match:
yield CondaTarget(unversioned_match.group(1))
@six.python_2_unicode_compatible
class CondaTarget(object):
def __init__(self, package, version=None, channel=None):
if SHELL_UNSAFE_PATTERN.search(package) is not None:
raise ValueError("Invalid package [%s] encountered." % package)
self.package = package
if version and SHELL_UNSAFE_PATTERN.search(version) is not None:
raise ValueError("Invalid version [%s] encountered." % version)
self.version = version
if channel and SHELL_UNSAFE_PATTERN.search(channel) is not None:
raise ValueError("Invalid version [%s] encountered." % channel)
self.channel = channel
def __str__(self):
attributes = "package=%s" % self.package
if self.version is not None:
attributes = "%s,version=%s" % (self.package, self.version)
else:
attributes = "%s,unversioned" % self.package
if self.channel:
attributes = "%s,channel=%s" % self.channel
return "CondaTarget[%s]" % attributes
__repr__ = __str__
@property
def package_specifier(self):
""" Return a package specifier as consumed by conda install/create.
"""
if self.version:
return "%s=%s" % (self.package, self.version)
else:
return self.package
@property
def install_environment(self):
""" The dependency resolution and installation frameworks will
expect each target to be installed it its own environment with
a fixed and predictable name given package and version.
"""
if self.version:
return "__%s@%s" % (self.package, self.version)
else:
return "__%s@_uv_" % (self.package)
def __hash__(self):
return hash((self.package, self.version, self.channel))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.package, self.version, self.channel) == (other.package, other.version, other.channel)
return False
def __ne__(self, other):
return not(self == other)
def hash_conda_packages(conda_packages, conda_target=None):
""" Produce a unique hash on supplied packages.
TODO: Ideally we would do this in such a way that preserved environments.
"""
h = hashlib.new('sha256')
for conda_package in conda_packages:
h.update(smart_str(conda_package.install_environment))
return h.hexdigest()
# shell makes sense for planemo, in Galaxy this should just execute
# these commands as Python
def install_conda(conda_context, force_conda_build=False):
f, script_path = tempfile.mkstemp(suffix=".sh", prefix="conda_install")
os.close(f)
download_cmd = commands.download_command(conda_link(), to=script_path, quote_url=False)
install_cmd = ['bash', script_path, '-b', '-p', conda_context.conda_prefix]
package_targets = [
"conda=%s" % CONDA_VERSION,
]
if force_conda_build or conda_context.use_local:
package_targets.append("conda-build=%s" % CONDA_BUILD_VERSION)
log.info("Installing conda, this may take several minutes.")
try:
exit_code = conda_context.shell_exec(download_cmd)
if exit_code:
return exit_code
exit_code = conda_context.shell_exec(install_cmd)
except Exception:
log.exception('Failed to install conda')
return 1
finally:
if os.path.exists(script_path):
os.remove(script_path)
if exit_code:
return exit_code
return conda_context.exec_install(package_targets, allow_local=False)
def install_conda_targets(conda_targets, conda_context, env_name=None, allow_local=True):
"""
Return the process exit code (i.e. 0 in case of success).
"""
if env_name is not None:
create_args = [
"--name", env_name, # environment for package
]
for conda_target in conda_targets:
create_args.append(conda_target.package_specifier)
return conda_context.exec_create(create_args, allow_local=allow_local)
else:
return conda_context.exec_install([t.package_specifier for t in conda_targets], allow_local=allow_local)
def install_conda_target(conda_target, conda_context, skip_environment=False):
"""
Install specified target into a its own environment.
Return the process exit code (i.e. 0 in case of success).
"""
if not skip_environment:
create_args = [
"--name", conda_target.install_environment, # environment for package
conda_target.package_specifier,
]
return conda_context.exec_create(create_args)
else:
return conda_context.exec_install([conda_target.package_specifier])
def cleanup_failed_install_of_environment(env, conda_context):
if conda_context.has_env(env):
conda_context.exec_remove([env])
def cleanup_failed_install(conda_target, conda_context=None):
cleanup_failed_install_of_environment(conda_target.install_environment, conda_context=conda_context)
def best_search_result(conda_target, conda_context, channels_override=None, offline=False):
"""Find best "conda search" result for specified target.
Return ``None`` if no results match.
"""
search_cmd = [conda_context.conda_exec, "search", "--full-name", "--json"]
if offline:
search_cmd.append("--offline")
if channels_override:
search_cmd.append("--override-channels")
for channel in channels_override:
search_cmd.extend(["--channel", channel])
else:
search_cmd.extend(conda_context._override_channels_args)
search_cmd.append(conda_target.package)
try:
res = commands.execute(search_cmd)
res = unicodify(res)
hits = json.loads(res).get(conda_target.package, [])
hits = sorted(hits, key=lambda hit: packaging.version.parse(hit['version']), reverse=True)
except CommandLineException:
log.error("Could not execute: '%s'", search_cmd)
hits = []
if len(hits) == 0:
return (None, None)
best_result = (hits[0], False)
for hit in hits:
if is_search_hit_exact(conda_target, hit):
best_result = (hit, True)
break
return best_result
def is_search_hit_exact(conda_target, search_hit):
target_version = conda_target.version
# It'd be nice to make request verson of 1.0 match available
# version of 1.0.3 or something like that.
return not target_version or search_hit['version'] == target_version
def is_conda_target_installed(conda_target, conda_context):
# fail by default
if conda_context.has_env(conda_target.install_environment):
return True
else:
return False
def filter_installed_targets(conda_targets, conda_context):
installed = functools.partial(is_conda_target_installed,
conda_context=conda_context)
return list(filter(installed, conda_targets))
def build_isolated_environment(
conda_packages,
conda_context,
path=None,
copy=False,
quiet=False,
):
""" Build a new environment (or reuse an existing one from hashes)
for specified conda packages.
"""
if not isinstance(conda_packages, list):
conda_packages = [conda_packages]
# Lots we could do in here, hashing, checking revisions, etc...
tempdir = None
try:
hash = hash_conda_packages(conda_packages)
tempdir = tempfile.mkdtemp(prefix="jobdeps", suffix=hash)
tempdir_name = os.path.basename(tempdir)
export_paths = []
for conda_package in conda_packages:
name = conda_package.install_environment
export_path = os.path.join(tempdir, name)
conda_context.export_list(
name,
export_path
)
export_paths.append(export_path)
create_args = ["--unknown"]
# Works in 3.19, 4.0 - 4.2 - not in 4.3.
# Adjust fix if they fix Conda - xref
# - https://github.com/galaxyproject/galaxy/issues/3635
# - https://github.com/conda/conda/issues/2035
offline_works = (conda_context.conda_version < packaging.version.parse("4.3")) or \
(conda_context.conda_version >= packaging.version.parse("4.4"))
if offline_works:
create_args.extend(["--offline"])
else:
create_args.extend(["--use-index-cache"])
if path is None:
create_args.extend(["--name", tempdir_name])
else:
create_args.extend(["--prefix", path])
if copy:
create_args.append("--copy")
for export_path in export_paths:
create_args.extend([
"--file", export_path
])
stdout_path = None
if quiet:
stdout_path = "/dev/null"
if path is not None and os.path.exists(path):
exit_code = conda_context.exec_install(create_args, stdout_path=stdout_path)
else:
exit_code = conda_context.exec_create(create_args, stdout_path=stdout_path)
return (path or tempdir_name, exit_code)
finally:
conda_context.exec_clean(quiet=quiet)
if tempdir is not None:
shutil.rmtree(tempdir)
def requirement_to_conda_targets(requirement):
conda_target = None
if requirement.type == "package":
conda_target = CondaTarget(requirement.name,
version=requirement.version)
return conda_target
def requirements_to_conda_targets(requirements):
conda_targets = (requirement_to_conda_targets(_) for _ in requirements)
return [c for c in conda_targets if c is not None]
__all__ = (
'CondaContext',
'CondaTarget',
'install_conda',
'install_conda_target',
'requirements_to_conda_targets',
)
|
11517472
|
import numpy as np
import pandas as pd
import os
def retrieve_and_save(countries, fns, out_dir, names, keys, sample=True):
for idx, country in enumerate(countries):
df = pd.read_csv(fns[idx], sep=' ')
if sample:
df = df[df["sample"]==1]
df = df[(df.lat!=0) & (df.lon!=0)]
for name, key in zip(names, keys):
if not os.path.exists(os.path.join(out_dir, country)):
os.makedirs(os.path.join(out_dir, country))
np.save(os.path.join(out_dir, country, name), df[key])
if idx == 0:
pooled = df.copy()
else:
pooled = pooled.append(df)
for name, key in zip(names, keys):
if not os.path.exists(os.path.join(out_dir, 'pooled')):
os.makedirs(os.path.join(out_dir, 'pooled'))
np.save(os.path.join(out_dir, 'pooled', name), pooled[key])
if __name__ == '__main__':
'''
The set of samples used in the paper was not quite the full set due to missing Landscan data (1 less cluster in Uganda LSMS, 8 less clusters in Tanzania LSMS). Set this variable to True to use the same set of household clusters, set to False to use the full set of household clusters.
'''
sample = True
############################
############ DHS ###########
############################
countries = ['nigeria', 'tanzania', 'uganda', 'malawi', 'rwanda']
fns = ['../data/output/DHS/Nigeria 2013 DHS (Cluster).txt',
'../data/output/DHS/Tanzania 2010 DHS (Cluster).txt',
'../data/output/DHS/Uganda 2011 DHS (Cluster).txt',
'../data/output/DHS/Malawi 2010 DHS (Cluster).txt',
'../data/output/DHS/Rwanda 2010 DHS (Cluster).txt']
out_dir = '../data/output/DHS/'
names = ['lats', 'lons', 'assets', 'nightlights', 'households']
keys = ['lat', 'lon', 'wealthscore', 'nl', 'n']
retrieve_and_save(countries, fns, out_dir, names, keys, sample=sample)
############################
############ LSMS ##########
############################
countries = ['nigeria', 'tanzania', 'uganda', 'malawi']
fns = ['../data/output/LSMS/Nigeria 2013 LSMS (Cluster).txt',
'../data/output/LSMS/Tanzania 2013 LSMS (Cluster).txt',
'../data/output/LSMS/Uganda 2012 LSMS (Cluster).txt',
'../data/output/LSMS/Malawi 2013 LSMS (Cluster).txt']
out_dir = '../data/output/LSMS/'
names = ['lats', 'lons', 'consumptions', 'nightlights', 'households']
keys = ['lat', 'lon', 'cons', 'nl', 'n']
retrieve_and_save(countries, fns, out_dir, names, keys, sample=sample)
|
11517511
|
import functools
import logging
def log_exception(logger=logging.getLogger(), rethrow=True):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception(e)
if rethrow:
raise
return wrapper
return deco
|
11517516
|
import argparse
import datetime
import getpass
import os
import subprocess
import sys
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) # noqa
sys.path.insert(0, pkg_root) # noqa
from scripts.redshift.ec2_instance_manager import EC2InstanceManager
def _init_env_vars():
os.chdir("../../config")
subprocess.call("source environment", shell=True)
os.chdir("../scripts/redshift")
def launch_loader(args):
_init_env_vars()
# spin up a new EC2 instance if necessary
if not args.instance_name:
instance_name = f"{getpass.getuser()}-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}"
print(f"No existing instance provided. Spinning up new EC2 {args.instance_type} instance: {instance_name}.")
ec2_instance = EC2InstanceManager(name=instance_name)
ec2_instance.create(instance_type=args.instance_type)
else:
print(f"Skipping instance creation. Using instance {args.instance_name}.")
ec2_instance = EC2InstanceManager(name=args.instance_name)
if args.state == 0:
print("Clearing all downloaded and generated files.")
ec2_instance.clear_dir("/mnt/*")
elif args.state == 1:
print("Clearing all generated files.")
ec2_instance.clear_dir("/mnt/output/*")
ec2_instance.provision()
ec2_instance.run(max_workers=args.max_workers,
state=args.state,
s3_upload_id=args.s3_upload_id,
project_uuids=args.project_uuids,
bundle_fqids=args.bundle_fqids)
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--instance-type",
help="Amazon EC2 instance type to create and execute ETL on.\n"
"One of --instance-type or --instance-name must be supplied.",
type=str,
default="c5d.4xlarge")
parser.add_argument("--instance-name",
help="Existing EC2 instance to execute ETL on.\n"
"One of --instance-type or --instance-name must be supplied.",
type=str,
default="")
parser.add_argument("--max-workers",
help="Maximum number of concurrent threads to use during extraction (bundle download).",
type=int,
default=512)
parser.add_argument("--state",
help="Current ETL machine state.\n"
"0 = Pre-ETL: executes full ETL\n"
"1 = Post-E: executes transform and load only\n"
"2 = Post-ET: executes load (includes upload to S3)\n"
"3 = Post-ET: executes load (from S3)",
type=int,
default=0)
parser.add_argument("--s3-upload-id",
help="REQUIRED for state==3.\n"
"S3 prefix (UUID) in dcp-matrix-service-preload-* S3 bucket to load Redshift from.",
type=str)
parser.add_argument("--project-uuids",
help="List of DCP Project UUIDs to load into Redshift.\n"
"If both project-uuids and bundle-fqids are not supplied, a full ETL will be performed.",
type=str,
nargs="*",
default="")
parser.add_argument("--bundle-fqids",
help="List of DCP Bundle FQIDs to load into Redshift.\n"
"If both project-uuids and bundle-fqids are not supplied, a full ETL will be performed.",
type=str,
nargs="*",
default="")
_args = parser.parse_args()
launch_loader(_args)
|
11517528
|
from cliff import command
class OpenStackCommand(command.Command):
"""Base class for OpenStack commands."""
api = None
def run(self, parsed_args):
if not self.api:
return
else:
return super(OpenStackCommand, self).run(parsed_args)
def get_data(self, parsed_args):
pass
def take_action(self, parsed_args):
return self.get_data(parsed_args)
|
11517621
|
class CustomSet:
def __init__(self, elements=[]):
pass
def isempty(self):
pass
def __contains__(self, element):
pass
def issubset(self, other):
pass
def isdisjoint(self, other):
pass
def __eq__(self, other):
pass
def add(self, element):
pass
def intersection(self, other):
pass
def __sub__(self, other):
pass
def __add__(self, other):
pass
|
11517634
|
from jink.utils.names import *
from jink.utils.classes import *
from jink.utils.future_iter import FutureIter
from jink.utils.func import pickle
import json
PRNTLINE = "\n--------------------------------\n"
class Parser:
def __init__(self):
self.tokens = None
def consume(self, item, soft=False):
"""Removes expected token, given a type or a tuple of types."""
current = self.tokens.current
if not item:
return self.tokens._next()
# Doesn't error out if the token isn't found
# But removes it if found
if soft:
if isinstance(item, tuple):
if current.type in item:
return self.tokens._next()
elif current.type == item:
return self.tokens._next()
else:
self.tokens._next()
if isinstance(item, tuple):
if current.type not in item:
raise Exception(f"Expected {' or '.join(item)}, got '{current.type}' on line {current.line}.")
else:
return current
else:
# Strings have text that could be used to spoof this check so I account for them here
if current.value == item and current.type != TokenType.STRING:
return current
elif current.type == item:
return current
raise Exception(f"Expected '{item}', got '{current.type}' on line {current.line}.")
def parse(self, tokens, verbose=False):
self.tokens = FutureIter(tokens)
ast = []
while self.tokens.current is not None:
if self.tokens.current.type != TokenType.NEWLINE:
ast.append(self.parse_top())
else:
self.tokens._next()
if verbose:
print("AST:", PRNTLINE, json.dumps(pickle(ast), indent=2), PRNTLINE)
return ast
def parse_literal(self, tokens):
return self.parse(tokens)
def parse_to_console(self, tokens):
program = self.parse(tokens)
for expr in program:
print(expr)
def skip_newlines(self, count=-1):
while self.tokens.current != None and self.tokens.current.type == TokenType.NEWLINE and count != 0:
count -= 1
self.tokens._next()
return self.tokens.current
def parse_top(self):
init = self.tokens.current
if init == None:
return
elif init.type != TokenType.KEYWORD:
return self.parse_expr()
elif init.value == 'import':
# Skip 'import'
self.tokens._next()
return self.parse_module()
elif init.value in ('let', 'const'):
self.tokens._next()
ident = self.consume(TokenType.IDENTIFIER)
cur = self.tokens.current
# Assignments
if (cur.type == TokenType.OPERATOR and cur.value == '=') or cur.type in (TokenType.NEWLINE, TokenType.SEMICOLON):
self.tokens._next()
return self.parse_assignment(init.value, ident.value)
elif init.value == 'fun':
self.tokens._next()
return self.parse_function()
# Return statements
elif init.value == 'return':
return self.parse_return()
# Conditionals
elif init.value == 'if':
return self.parse_conditional()
# Null
elif init.value == 'null':
self.tokens._next()
return Null()
else:
raise Exception(f"Expected keyword, got '{init.value}' on line {init.line}.")
def parse_expr(self, precedence=0):
left = self.parse_primary()
current = self.tokens.current
while current and current.type == TokenType.OPERATOR and self.get_precedence(current) >= precedence:
operator = self.tokens._next()
if operator.value in ('++', '--'):
return UnaryOperator(operator.value + ':post', left)
next_precedence = self.get_precedence(operator)
if self.is_left_associative(operator):
next_precedence += 1
right = self.parse_expr(next_precedence)
left = BinaryOperator(operator.value, left, right)
current = self.tokens.current
if current and current.type == TokenType.SEMICOLON:
self.consume(TokenType.SEMICOLON)
return left
def parse_primary(self):
self.skip_newlines()
current = self.tokens.current
if current == None: return
if self.is_unary_operator(current):
operator = self.tokens._next()
if operator.value in ('-', '+', '!'):
value = self.parse_primary()
return UnaryOperator(operator.value, value)
value = self.parse_expr(self.get_precedence(operator))
return UnaryOperator(operator.value, value)
elif current.value == '(':
self.consume(TokenType.LPAREN)
value = self.parse_expr(0)
self.consume(TokenType.RPAREN)
return value
elif current.value == '{':
self.consume(TokenType.LBRACE)
obj = self.parse_object()
self.consume(TokenType.RBRACE)
return obj
elif current.type == TokenType.NUMBER:
current = self.tokens._next()
if current.value.count('.') > 0:
return FloatingPointLiteral(float(current.value))
return IntegerLiteral(int(current.value))
elif current.type == TokenType.STRING:
return StringLiteral(self.tokens._next().value)
elif current.type == TokenType.IDENTIFIER:
ident = self.tokens._next().value
if self.tokens.current.value == '.':
self.tokens._next()
index = { 'type': 'prop', 'index': self.parse_top() }
return IdentLiteral(ident, index)
elif self.tokens.current.value == '(':
return self.parse_call(ident)
elif self.tokens.current.value == '=':
self.tokens._next()
return self.parse_assignment(None, ident)
else:
return IdentLiteral(ident)
elif current.type == TokenType.KEYWORD:
keyword = self.tokens._next().value
if keyword in ('true', 'false'):
return BooleanLiteral(keyword)
elif self.tokens.current.value == '(':
return self.parse_call(keyword)
elif keyword == 'null':
return Null()
raise Exception(f"Expected primary expression, got '{current.value}' on line {current.line}.")
def is_unary_operator(self, token):
if hasattr(token, 'type'):
if token.type == TokenType.STRING:
return False
return token.value in ('-', '+', '++', '--', '!')
def is_left_associative(self, token):
if hasattr(token, 'type'):
if token.type == TokenType.STRING:
return False
return token.value not in ('++', '--', '+=', '-=', '=')
def get_precedence(self, token):
if token.value in ('+', '-'):
return 1
elif token.value in ('*', '/', '%'):
return 2
elif token.value in ('^'):
return 3
else:
return 0
# TODO Reverse nesting behaviour
def parse_module(self, name=None, index=None):
if self.tokens.current.type == None:
return Module(name, index)
if self.tokens.current.type in (TokenType.NEWLINE, TokenType.SEMICOLON):
self.consume((TokenType.NEWLINE, TokenType.SEMICOLON))
return Module(name, index)
# Expect package name
if self.tokens.current.type not in (TokenType.IDENTIFIER, TokenType.OPERATOR):
if self.tokens.current.type == TokenType.OPERATOR and self.tokens.current.value != '.':
raise Exception(f"Expected '.' got '{self.tokens.current.value}' on line {self.tokens.current.line}.")
raise Exception(f"Expected package index, got '{self.tokens.current.value}' on line {self.tokens.current.line}.")
# Store current index and move on
write_index = self.tokens._next()
module = Module(write_index.value, index)
if self.tokens.current.type in (TokenType.IDENTIFIER, TokenType.OPERATOR):
return self.parse_module(self.tokens.current.value, Module(write_index.value, index))
if self.tokens.current.type in (TokenType.NEWLINE, TokenType.SEMICOLON):
self.consume((TokenType.NEWLINE, TokenType.SEMICOLON))
return module
def parse_assignment(self, var_type, name):
if self.tokens.current.type in (TokenType.NEWLINE, TokenType.SEMICOLON, TokenType.COMMA):
assignment = Assignment(var_type, IdentLiteral(name), None)
elif self.tokens.current.type == TokenType.RPAREN:
assignment = Assignment(var_type, IdentLiteral(name), None)
return assignment
else:
assignment = Assignment(var_type, IdentLiteral(name), self.parse_expr())
if self.tokens.current != None and self.tokens.current.type != TokenType.COMMA:
self.consume((TokenType.NEWLINE, TokenType.SEMICOLON))
return assignment
def parse_call(self, func_name):
args = self.parse_args_params('args')
return CallExpression(IdentLiteral(func_name), args)
def parse_function(self):
ident = self.consume(TokenType.IDENTIFIER)
params = self.parse_args_params('params')
body = self.parse_block()
return Function(ident.value, params, body)
# Parse function parameters and call arguments
def parse_args_params(self, location):
self.consume(TokenType.LPAREN)
l = []
# Function parameters
if location == 'params':
while True and self.tokens.current != None:
if self.tokens.current.value == ')':
self.consume(TokenType.RPAREN)
break
elif self.tokens.current.value == '{':
break
cur = self.tokens._next()
if cur.type == TokenType.KEYWORD and cur.value in ('let', 'const'):
ident = self.consume(TokenType.IDENTIFIER)
_next = self.tokens.current
# Close out function params
if _next.type == TokenType.RPAREN:
l.append(FunctionParameter(ident.value, cur.value, None))
# Expect comma or colon
# fun test(let a<,> let b<:> 10) {}
elif _next.type in (TokenType.COMMA, TokenType.OPERATOR) and _next.value in (',', ':'):
if _next.value == ':':
self.tokens._next()
default = self.parse_expr()
l.append(FunctionParameter(ident.value, cur.value, default))
self.tokens._next()
elif _next.value == ',':
l.append(FunctionParameter(ident.value, cur.value, None))
self.tokens._next()
else:
raise Exception(f"Expected comma or colon, got '{cur.value}' on line {cur.line}.")
else:
raise Exception(f"Expected let or const, got '{cur.value}' on line {cur.line}.")
# Call arguments
else:
while True and self.tokens.current != None:
if self.tokens.current.value == ')':
self.consume(TokenType.RPAREN)
break
l.append(self.parse_top())
if self.tokens.current.value in (',', 'newline'):
self.consume((TokenType.COMMA, TokenType.NEWLINE), soft=True)
else:
self.consume(TokenType.RPAREN)
break
return l
# Return parsing
def parse_return(self):
self.tokens._next()
if self.tokens.current.type == TokenType.SEMICOLON:
self.tokens._next()
return Return(None)
if self.tokens.current.type == TokenType.NEWLINE:
return Return(None)
expr = self.parse_expr()
return Return(expr)
# Conditional parsing
def parse_conditional(self):
init = self.tokens._next()
# Parse else first because it is unlike if/elseif
if init.value == 'else':
return Conditional(init.value, None, self.parse_block(), None)
body = []
else_body = []
self.consume(TokenType.LPAREN)
expr = self.parse_expr()
self.consume(TokenType.RPAREN)
body = self.parse_block()
# If an else case is next
self.skip_newlines()
_next = self.tokens.current
if _next and _next.type == TokenType.KEYWORD and _next.value in ('elseif', 'else'):
else_body.append(self.parse_conditional())
return Conditional(init.value, expr, body, else_body)
# Parse blocks for functions and conditionals
def parse_block(self):
body = []
if self.tokens.current.value == '{':
self.consume(TokenType.LBRACE)
self.skip_newlines()
while self.tokens.current != None and self.tokens.current.value != '}':
body.append(self.parse_top())
self.skip_newlines()
if self.tokens._next() == None:
raise Exception(f"Expected '}}', got '{self.tokens.current.value}' on line {self.tokens.current.line}.")
# One or two lined
# ex: fun say_hi() return print("Hi")
else:
init = self.tokens.current
# Skip only one line
# If there is more space before an expression, you're doing it wrong kiddo
self.skip_newlines(1)
if self.tokens.current.type == TokenType.NEWLINE:
raise Exception(f"Empty function body on line {init.line}.")
body.append(self.parse_top())
return body
def parse_kv_pair(self):
self.skip_newlines()
k = self.consume((TokenType.IDENTIFIER, TokenType.STRING)).value
self.consume(':')
if self.tokens.current.type == TokenType.LBRACE:
self.consume(TokenType.LBRACE)
v = self.parse_object()
self.consume(TokenType.RBRACE)
else:
v = self.consume((TokenType.IDENTIFIER, TokenType.STRING)).value
return k, v
def parse_object(self):
obj = {}
while self.tokens.current is not None and self.tokens.current.type is not TokenType.RBRACE:
k, v = self.parse_kv_pair()
obj[k] = v
self.skip_newlines()
if self.tokens.current.type == TokenType.RBRACE:
break
self.consume(TokenType.COMMA)
if self.tokens.current.value != '}':
raise Exception(f"Expected '}}', got '{self.tokens.current.value}' on line {self.tokens.current.line}.")
return obj
|
11517654
|
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
class MapQueueTypeView(QWidget):
def __init__(self, parent):
super().__init__(parent)
self._init_ui()
def _init_ui(self):
self.layout = QVBoxLayout()
self.queue_type_box: QComboBox = QComboBox(self)
self.layout.addWidget(self.queue_type_box)
self.setLayout(self.layout)
|
11517696
|
import numpy as np
import os
from os import listdir
from os.path import isdir, isfile, join
import math,sys
# requires scipy installation
import scipy.cluster.hierarchy as sch
import scipy.stats as stats
from scipy.spatial.distance import *
from scipy.cluster.hierarchy import *
SCRIPTPATH = os.environ['SCRIPTPATH']
sys.path.insert(0,SCRIPTPATH)
import utils
def seriation(Z,N,cur_index):
if cur_index < N:
return [cur_index]
else:
left = int(Z[cur_index-N,0])
right = int(Z[cur_index-N,1])
return (seriation(Z,N,left) + seriation(Z,N,right))
def get_clusters(accvals, image=False, f=0.25, method="average"):
# Create linkage matrix
size = N = accvals.shape[0]
distance_between_decoys = np.zeros((size, size))
for i in range(size):
for j in range(size):
distance_between_decoys[i,j] = np.mean(np.sqrt(np.square((accvals[i])-(accvals[j]))))
condensed = squareform(distance_between_decoys)
z = sch.linkage(condensed, method=method)
order = seriation(z, N, N + N-2)
cs = sch.fcluster(z, f*condensed.max(), criterion="distance")
return cs #which cluster each member belongs to
def cb_lddt(nat, pdb):
def d2(crd1,crd2):
val = 0.0
for k in range(3):
val += (crd1[k]-crd2[k])*(crd1[k]-crd2[k])
return val
natcrds = utils.pdb2crd(nat,'CB')
deccrds = utils.pdb2crd(pdb,'CB')
reslist = natcrds.keys()
contacts = []
for res1 in reslist:
for res2 in reslist:
if res1 >= res2: continue
dis2 = d2(natcrds[res1],natcrds[res2])
if dis2 < 225.0:
contacts.append((res1,res2,math.sqrt(dis2)))
lddts = {}
for res in reslist:
lddts[res] = []
for res1,res2,dnat in contacts:
crd1 = deccrds[res1]
crd2 = deccrds[res2]
d = math.sqrt(d2(crd1,crd2))
count = 0.0
diff = abs(dnat-d)
for crit in [0.5,1.0,2.0,4.0]:
if diff < crit: count += 0.25
lddts[res1].append(count)
lddts[res2].append(count)
inds = [i-1 for i in reslist]
vals = [sum(lddts[res])/len(lddts[res]) for res in reslist]
return inds, vals
def sliding_improvement(best, base, window=10):
output = []
for i in range(0, len(base)-5, 5):
t1 = best[i:i+window]
t2 = base[i:i+window]
output.append(np.mean(t1-t2))
return np.max(output)
# Given a folder full of predictions,
# 1. performs hirarchical clustering
# 2. computes centroids
# 3. computes compatibility among centroids
def cluster(infolder, testmode=False, slide=False,
verbose=False,
ntrial=20, nmin=10, nmax=25):
files = [f[:-4] for f in listdir(infolder) if f.endswith('.npz')]
order = [int(f.split('.')[1]) for f in files]
files = [files[i] for i in np.argsort(order)] #reorder by index
dec_acc = []
for f in files:
filename = join(infolder, f+".npz")
dec_acc.append(np.load(filename)["lddt"])
dec_acc = np.array(dec_acc)
f = 0.25
for i in range(ntrial):
assignment = get_clusters(dec_acc, f=f)
c_num = np.max(assignment)
if c_num < nmin: f -= 0.01
elif c_num > nmax: f += 0.01
else: break
if verbose:
print("threshold:", f)
print("# samples:", len(files))
print("# clusters:", c_num)
# Calculate centroids by taking avaerage
centroids = {}
for i in range(1, c_num+1):
centroids[i] = np.mean(dec_acc[assignment==i], axis=0)
compat_matrix = np.zeros((c_num, c_num))
for i in range(c_num):
for j in range(c_num):
# Take best possible recombination at each position
temp_best = np.max(np.stack([centroids[i+1], centroids[j+1]]), axis=0)
# Quantify improvement as mean lddt improvement
if slide:
improvement = sliding_improvement(temp_best, centroids[i+1])
else:
improvement = np.mean(temp_best-centroids[i+1])
assert(improvement>=0)
compat_matrix[i,j] = improvement
return np.array(files), assignment, compat_matrix, dec_acc
def get_region_complementary(i,js,d,logout=sys.stdout):
for j in js:
super = (d[j]-d[i]>0.03)[0]
for k in range(1,len(super)-1):
if super[k-1] and super[k+1]: super[k] = True
regs = []
for k in range(len(super)):
if not super[k]: continue
if regs == [] or k-regs[-1][-1] > 1:
regs.append([k])
else:
regs[-1].append(k)
# Given the output of above functio and a sample name of interest
# Chooses 4 (optinal number) samples that likely amend the weekness of the sample of interest.
def choose_mates(name, names, assignment, compat_matrix,
counts, maxcounts=99,
num=4, image=False, infolder="",
logout=sys.stdout):
# Get index of sample of interest
index = np.arange(len(names))[names==name]
assert(len(index) == 1)
index = index[0]
# Get cluster of sample of interest
cluster = assignment[index]
# Get compatibility vector and get ordering of clusters from most compatible to least compatible
compat_vector = compat_matrix[cluster-1, :]
temp = [(compat_vector[i], i+1) for i in range(len(compat_vector))]
temp.sort(reverse=True)
compatible_clusters = []
npick = min(np.max(assignment),num)
while len(compatible_clusters) < npick:
compatible_clusters = [c for i,c in temp if counts[c-1] < maxcounts]
maxcounts += 1 #relieve criteria if fails
compatible_clusters = compatible_clusters[:npick]
# Choose samples based on clusters
output = []
#logout.write("%s: compatible clusters (self %d),"%(name,cluster)+" ".join(compatible_clusters)+"\n")
for c in compatible_clusters:
n = np.random.choice(names[assignment==c])
counts[c-1] += 1
output.append(n)
return output
def main(infolder,out=None,verbose=False,seeds=[],logout=sys.stdout):
if verbose:
print("Reading", infolder)
print("Writing to", outfile)
output = "#base, pair1, pair2, pair3, pair4\n"
n, a, c, d = cluster(infolder, testmode=False, slide=True, verbose=verbose)
if seeds != []: n_seed = [n[i] for i in seeds]
else: n_seed = n
counts = np.zeros(len(c))
nmax_choose = int(0.4*len(n_seed)) # not more than 40%
combs = {}
for s in n_seed: # filename
partners = choose_mates(s, n, a, c,
counts, maxcounts=nmax_choose,
image=False, infolder=infolder,
logout=logout)
i_s = np.arange(len(n))[n==s]
i_ps = [np.arange(len(n))[n==p] for p in partners]
get_region_complementary(i_s,i_ps,d,logout=logout)
output += ",".join([s]+partners)+"\n"
combs[s] = partners
if out != None:
out.write(output)
return combs #indices are modelno (e.g. 0 for iter0.0, 1 for iter0.1,...)
if __name__ == "__main__":
infolder = sys.argv[1]
outfile = sys.argv[2]
out = open(outfile,'w')
seeds = []
if '-seed' in sys.argv:
seeds = [int(word) for word in sys.argv[sys.argv.index('-seed')+1].split(',')]
main(infolder,out,True,seeds)
out.close()
|
11517721
|
from functools import partial, update_wrapper
import Metrics
import ContentCentricMeasurements
import UserCentricMeasurements
#from load_data import load_data
from BaselineMeasurements import *
import pprint
def named_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
partial_func.varnames = func.__code__.co_varnames
return partial_func
reddit_events = ["post","comment"]
user_measurement_params = {
### User Centric Measurements
"user_unique_content": {
'question': '17',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getUserUniqueContent",
"measurement_args":{"eventTypes":reddit_events,"content_field":"root"},
"metrics": {
"js_divergence": named_partial(Metrics.js_divergence, discrete=False),
"rmse": Metrics.rmse,
"nrmse": named_partial(Metrics.rmse,relative=True),
"r2": Metrics.r2}
},
"user_activity_timeline": {
"question": '19',
"scale": "node",
"node_type":"user",
"scenario1":False,
"scenario2":True,
"scenario3":False,
"measurement": "getUserActivityTimeline",
"measurement_args":{"eventTypes":reddit_events},
"metrics": {"rmse": Metrics.rmse,
"nrmse": named_partial(Metrics.rmse,relative=True),
"ks_test": Metrics.ks_test,
"dtw": Metrics.dtw}
},
"user_activity_distribution": {
"question": '24a',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getUserActivityDistribution",
"measurement_args":{"eventTypes":reddit_events},
"metrics": {"rmse": Metrics.rmse,
"nrmse": named_partial(Metrics.rmse,relative=True),
"r2": Metrics.r2,
"js_divergence": named_partial(Metrics.js_divergence, discrete=True)}
},
"most_active_users": {
"question": '24b',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getMostActiveUsers",
"measurement_args":{"k":1500,"eventTypes":reddit_events},
"metrics": {"rbo": named_partial(Metrics.rbo_score, p=0.9965)}
},
"user_popularity": {
"question": '25',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getUserPopularity",
"measurement_args":{"k":1500,"eventTypes":reddit_events,"content_field":"root"},
"metrics": {"rbo": named_partial(Metrics.rbo_score, p=0.9965)}
},
"user_gini_coef": {
"question": '26a',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getGiniCoef",
"measurement_args":{"nodeType":"user","eventTypes":reddit_events},
"metrics": {"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"user_palma_coef": {
"question": '26b',
"scale": "population",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getPalmaCoef",
"measurement_args":{"nodeType":"user","eventTypes":reddit_events},
"metrics": {"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
#"user_diffusion_delay": {
# "question": '27',
# "scale": "population",
# "node_type":"user",
# "scenario1":True,
# "scenario2":True,
# "scenario3":True,
# "measurement": "getUserDiffusionDelay",
# "measurement_args":{"eventTypes":reddit_events},
# "metrics": {"ks_test": Metrics.ks_test}
#}
}
content_measurement_params = {
##Content-centric measurements
# "content_diffusion_delay": {
# "question": 1,
# "scale": "node",
# "node_type":"content",
# "scenario1":False,
# "scenario2":True,
# "scenario3":False,
# "measurement": "getContentDiffusionDelay",
# "measurement_args":{"eventTypes":["comment"],"time_bin":"h"},
# "metrics": {"ks_test": Metrics.ks_test,
# "js_divergence": named_partial(Metrics.js_divergence, discrete=False)},
# },
# "content_growth": {
# "question": 2,
# "scale": "node",
# "node_type":"content",
# "scenario1":False,
# "scenario2":True,
# "scenario3":False,
# "measurement": "getContentGrowth",
# "measurement_args":{"eventTypes":reddit_events,"time_bin":"h"},
# "metrics": {"rmse": named_partial(Metrics.rmse, join="outer"),
# "dtw": Metrics.dtw}
# },
# "content_contributors": {
# "question": 4,
# "scale": "node",
# "node_type":"content",
# "scenario1":False,
# "scenario2":True,
# "scenario3":False,
# "measurement": "getContributions",
# "measurement_args":{"eventTypes":reddit_events},
# "metrics": {"rmse": named_partial(Metrics.rmse, join="outer"),
# "dtw": Metrics.dtw}
# },
# "content_event_distribution_dayofweek": {
# "question": 5,
# "scale": "node",
# "node_type":"content",
# "scenario1":False,
# "scenario2":True,
# "scenario3":False,
# "measurement": "getDistributionOfEvents",
# "measurement_args":{"weekday":True},
# "metrics": {"js_divergence": named_partial(Metrics.js_divergence, discrete=True)}
# },
"content_liveliness_distribution": {
"question": 13,
"scale": "population",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getDistributionOfEventsByContent",
"measurement_args":{"eventTypes":["comment"],"content_field":"root"},
"metrics": {"js_divergence": named_partial(Metrics.js_divergence, discrete=False)}
},
# "content_liveliness_topk": {
# "question": 13,
# "scale": "population",
# "node_type":"content",
# "scenario1":True,
# "scenario2":True,
# "scenario3":True,
# "measurement": "getTopKContent",
# "measurement_args":{"k":1500,"eventTypes":["comment"],"content_field":"root"},
# "metrics": {"rbo": named_partial(Metrics.rbo_score, p=0.9965)}
# },
"content_activity_disparity_gini_comment": {
"question": 14,
"scale": "population",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"filters": {"event": ["comment"]},
"measurement": "getGiniCoef",
"measurement_args":{"eventTypes":["comment"]},
"metrics": {"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"content_activity_disparity_palma_comment": {
"question": 14,
"scale": "population",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement": "getPalmaCoef",
"measurement_args":{"eventTypes":["comment"]},
"metrics": {"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"subreddit_user_continue_prop":{
"question":"30",
"scale":"node",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"propUserContinue",
"measurement_args":{"eventTypes":["comment","post"],"content_field":"subreddit"},
"metrics":{"rmse":Metrics.rmse,
"nrmse": named_partial(Metrics.rmse,relative=True),
}
},
"subreddit_post_to_comment":{
"question":'31',
"scale":"node",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement_args":{"eventTypes":reddit_events,"event1":"post","event2":"comment","content_field":"subreddit"},
"measurement":"getEventTypeRatioTimeline",
"metrics":{"rmse":Metrics.rmse,
"nrmse": named_partial(Metrics.rmse,relative=True),
}
}
}
community_measurement_params = {
#Community-level measurements
"community_gini":{
"question":'6',
"scale":"community",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"getCommunityGini",
"measurement_args":{"eventTypes":reddit_events,"community_field":"subreddit","content_field":"root"},
"metrics":{"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"community_palma":{
"question":'6',
"scale":"community",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"getCommunityPalma",
"measurement_args":{"eventTypes":reddit_events,"community_field":"subreddit","content_field":"root"},
"metrics":{"absolute_difference": Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"community_event_proportions":{
"question":'7',
"scale":"community",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"getProportion",
"measurement_args":{"eventTypes":reddit_events,"community_field":"subreddit"},
"metrics":{"js_divergence": named_partial(Metrics.js_divergence,discrete=True)}
},
"community_contributing_users":{
"question":"20",
"scale":"community",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"contributingUsers",
"measurement_args":{"eventTypes":reddit_events,"community_field":"subreddit"},
"metrics":{"absolute_difference":Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"community_num_user_actions":{
"question":"23",
"scale":"community",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"getNumUserActions",
"measurement_args":{"eventTypes":reddit_events,"unit":'D',"community_field":"subreddit"},
"metrics":{"rmse": named_partial(Metrics.rmse, join="outer"),
"nrmse": named_partial(Metrics.rmse,relative=True),
"dtw": Metrics.dtw,
"js_divergence": named_partial(Metrics.js_divergence,discrete=False)
}
},
"community_burstiness":{
"question":"9",
"scale":"community",
"node_type":"content",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"burstsInCommunityEvents",
"measurement_args":{"eventTypes":reddit_events,"community_field":"subreddit"},
"metrics":{"absolute_difference":Metrics.absolute_difference,
"absolute_percentage_error": Metrics.absolute_percentage_error}
},
"community_user_burstiness":{
"question":"",
"scale":"community",
"node_type":"user",
"scenario1":True,
"scenario2":True,
"scenario3":True,
"measurement":"getUserBurstByCommunity",
"metrics":{'ks_test':Metrics.ks_test}
}
}
reddit_measurement_params = {}
reddit_measurement_params.update(user_measurement_params)
reddit_measurement_params.update(content_measurement_params)
reddit_measurement_params.update(community_measurement_params)
|
11517734
|
from scrapy import Spider
from covers.items import Team
from covers.loaders import TeamLoader
class TeamSpider(Spider):
name = 'teams'
allowed_domains = ['covers.com']
start_urls = ['https://www.covers.com/sport/basketball/nba/teams']
def parse(self, response):
for row in response.xpath('//td/a'):
loader = TeamLoader(item=Team(), selector=row)
loader.add_xpath('city', 'text()')
loader.add_xpath('url', '@href')
yield loader.load_item()
|
11517797
|
from plusseg.utils.registry import Registry
BACKBONES = Registry()
DECODERS = Registry()
POSTPROCESSORS = Registry()
|
11517817
|
TEST_PATH_GOOD = 'path/path'
TEST_PATH_WITH_ENDING_SLASH = 'path/'
TEST_PATH_WITH_INITIAL_SLASH = '/path'
|
11517818
|
import os
import sys
import numpy as np
from .layers import Layer
if sys.version_info[0] < 3:
FileNotFoundError = IOError
else:
long = int
def to_numpy(data):
if isinstance(data, np.ndarray):
return data
if hasattr(data, 'asnumpy'):
return data.asnumpy()
if hasattr(data, 'numpy'):
return data.numpy()
if isinstance(data, (list, tuple)):
return np.array(data)
raise TypeError('Unsupported Type: {}'.format(type(data)))
def to_tuple(data):
if isinstance(data, tuple):
return data
if isinstance(data, list):
return tuple(data)
return (data, )
def assert_almost_equal(a, b, rtol=1e-5, atol=1e-8):
def check_value(data, other):
if isinstance(data, (int, long, float)):
if hasattr(other, 'shape'):
return np.full(other.shape, fill_value=data)
else:
return np.array(a)
return data
a = check_value(a, b)
b = check_value(b, a)
a = to_numpy(a)
b = to_numpy(b)
# Check Shape
# If the shapes don't match, raise AssertionError and print the shapes
assert a.shape == b.shape,\
AssertionError('Unmatched Shape: {} vs {}'.format(a.shape, b.shape))
# Compute Absolute Error |a - b|
error = a - b
abs_error = np.abs(error)
max_abs_error = abs_error.max()
def raise_error(abs_error, info):
# tell where is maximum absolute error and the value
loc = np.argmax(abs_error)
idx = np.unravel_index(loc, abs_error.shape)
out = ''
def get_array_R(data, name, idx, R):
axes = [-1] if data.ndim == 1 else [-1, -2]
shape = data.shape
slice_list = list(idx)
sidx = list(idx[-2:])
for i in axes:
axis_len = shape[i]
axis_i = slice_list[i]
start = max(0, axis_i - R + 1)
stop = min(axis_len, axis_i + R)
slice_list[i] = slice(start, stop)
sidx[i] -= start
def str_slice_list(slice_list):
return ', '.join([str(s) if not isinstance(s, slice) else
'{}:{}'.format(s.start, s.stop) for s in slice_list])
sdata = data.round(5)
return '{name}[{slice_list}]:\n{data}\n'.format(name=name, slice_list=str_slice_list(slice_list),
data=sdata)
R = 5
out += 'Location of maximum error: {}\n'.format(idx)
out += '{}\n{}\n{}'.format(info,
get_array_R(
a, 'a', idx, R),
get_array_R(
b, 'b', idx, R),
)
raise AssertionError(out)
# Check Absolute Error
if atol is not None:
if max_abs_error > atol:
# If absolute error >= atol, raise AssertionError,
idx = abs_error.argmax()
raise_error(abs_error, 'Maximum Absolute Error({}) > atol({}): {} vs {}'.
format(max_abs_error, atol, a.ravel()[idx], b.ravel()[idx]))
# Compute Relative Error |(a-b)/b|
try:
eps = np.finfo(b.dtype).eps
except ValueError:
eps = np.finfo(np.float32).eps
relative_error = abs_error / (np.abs(b) + eps)
max_relative_error = relative_error.max()
# Check Relative Error
if max_relative_error > rtol:
# If relative error >= rtol, raise AssertionError,
idx = relative_error.argmax()
raise_error(relative_error, 'Maximum Relative Error({}) > rtol({}): {} vs {}'.
format(max_relative_error, rtol, a.ravel()[idx], b.ravel()[idx]))
def assert_file_exists(fname):
assert os.path.exists(fname), IOError("{} not found".format(fname))
def gradcheck(layer, inputs, eps=1e-6, rtol=1e-2, atol=None, sampling=None):
assert isinstance(layer, Layer)
if not isinstance(inputs, (tuple, list)):
inputs = (inputs, )
# To NumPy Tensor
inputs = [to_numpy(x) for x in inputs]
# Set Input
layer.X = inputs if len(inputs) > 1 else inputs[0]
layer.forward_all()
ori_out = to_tuple(layer.Y)
assert isinstance(ori_out, (tuple, list)), type(ori_out)
dys = [np.random.normal(0, 0.01, size=out_i.shape) +
0.1 for out_i in ori_out]
assert len(dys) == len(ori_out), '{} vs {}'.format(len(dys), len(ori_out))
layer.dY = dys if len(dys) > 1 else dys[0]
layer.backward()
grad = to_tuple(layer.dX)
for i, x in enumerate(inputs):
size = inputs[i].size
sample_grad = np.empty_like(inputs[i])
sample_grad_ravel = sample_grad.ravel()
samples = np.arange(size)
if sampling is not None:
if isinstance(sampling, int):
num_samples = sampling
else:
num_samples = int(sampling * size)
samples = np.random.choice(samples, min(size, num_samples))
for k in samples:
x_ravel = x.ravel()
old_elem_value = x_ravel[k]
x_ravel[k] = old_elem_value + eps / 2
layer.X = inputs if len(inputs) > 1 else inputs[0]
layer.forward_all()
pos_out = to_tuple(layer.Y)
x_ravel[k] = old_elem_value - eps / 2
layer.X = inputs if len(inputs) > 1 else inputs[0]
layer.forward_all()
neg_out = to_tuple(layer.Y)
assert len(pos_out) == len(neg_out)
assert len(pos_out) == len(ori_out)
numerical_grad_k = np.sum(
[dy * (pos - neg) / eps for pos, neg, dy in zip(pos_out, neg_out, dys)])
sample_grad_ravel[k] = numerical_grad_k
numerical_grad = grad[i].copy()
numerical_grad.ravel()[samples] = sample_grad.ravel()[samples]
assert_almost_equal(numerical_grad, grad[i], rtol=rtol, atol=atol)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.